From dc6421826d5a39dce58e37f54454c6769fe3d023 Mon Sep 17 00:00:00 2001 From: Dan Forbes Date: Fri, 24 Jul 2020 06:30:48 -0700 Subject: [PATCH 001/132] Augmented node template docs (#6721) --- bin/node-template/README.md | 96 ++++++++++++++++++- bin/node-template/node/src/chain_spec.rs | 34 ++++++- bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/pallets/template/src/lib.rs | 79 +++++++-------- .../pallets/template/src/mock.rs | 13 ++- .../pallets/template/src/tests.rs | 9 +- bin/node-template/runtime/src/lib.rs | 14 +-- 7 files changed, 173 insertions(+), 74 deletions(-) diff --git a/bin/node-template/README.md b/bin/node-template/README.md index a5929d21f35..ad514617ee3 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -8,8 +8,8 @@ Follow these steps to prepare a local Substrate development environment :hammer_ ### Simple Setup -Install all the required dependencies with a single command (be patient, this can take up -to 30 minutes). +Install all the required dependencies with a single command (be patient, this can take up to 30 +minutes). ```bash curl https://getsubstrate.io -sSf | bash -s -- --fast @@ -17,7 +17,8 @@ curl https://getsubstrate.io -sSf | bash -s -- --fast ### Manual Setup -Find manual setup instructions at the [Substrate Developer Hub](https://substrate.dev/docs/en/knowledgebase/getting-started/#manual-installation). +Find manual setup instructions at the +[Substrate Developer Hub](https://substrate.dev/docs/en/knowledgebase/getting-started/#manual-installation). ### Build @@ -54,8 +55,8 @@ RUST_LOG=debug RUST_BACKTRACE=1 ./target/release/node-template -lruntime=debug - ### Multi-Node Local Testnet To see the multi-node consensus algorithm in action, run a local testnet with two validator nodes, -Alice and Bob, that have been [configured](/bin/node-template/node/src/chain_spec.rs) as the -initial authorities of the `local` testnet chain and endowed with testnet units. +Alice and Bob, that have been [configured](/bin/node-template/node/src/chain_spec.rs) as the initial +authorities of the `local` testnet chain and endowed with testnet units. Note: this will require two terminal sessions (one for each node). @@ -92,6 +93,91 @@ cargo run -- \ Execute `cargo run -- --help` to learn more about the template node's CLI options. +## Template Structure + +A Substrate project such as this consists of a number of components that are spread across a few +directories. + +### Node + +A blockchain node is an application that allows users to participate in a blockchain network. +Substrate-based blockchain nodes expose a number of capabilities: + +- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the + nodes in the network to communicate with one another. +- Consensus: Blockchains must have a way to come to + [consensus](https://substrate.dev/docs/en/knowledgebase/advanced/consensus) on the state of the + network. Substrate makes it possible to supply custom consensus engines and also ships with + several consensus mechanisms that have been built on top of Web3 Foundation research. +- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. + +There are several files in the `node` directory - take special note of the following: + +- [`chain_spec.rs`](./node/src/chain_spec.rs): A + [chain specification](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec) is a + source code file that defines a Substrate chain's initial (genesis) state. Chain specifications + are useful for development and testing, and critical when architecting the launch of a + production chain. Take note of the `development_config` and `testnet_genesis` functions, which + are used to define the genesis state for the local development chain configuration. These + functions identify some + [well-known accounts](https://substrate.dev/docs/en/knowledgebase/integrate/subkey#well-known-keys) + and use them to configure the blockchain's initial state. +- [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of + the libraries that this file imports and the names of the functions it invokes. In particular, + there are references to consensus-related topics, such as the + [longest chain rule](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#longest-chain-rule), + the [Aura](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#aura) block authoring + mechanism and the + [GRANDPA](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#grandpa) finality + gadget. + +After the node has been [built](#build), refer to the embedded documentation to learn more about the +capabilities and configuration parameters that it exposes: + +```shell +./target/release/node-template --help +``` + +### Runtime + +The Substrate project in this repository uses the +[FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) framework to construct a +blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules +called "pallets". At the heart of FRAME is a helpful +[macro language](https://substrate.dev/docs/en/knowledgebase/runtime/macros) that makes it easy to +create pallets and flexibly compose them to create blockchains that can address a variety of needs. + +Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note +the following: + +- This file configures several pallets to include in the runtime. Each pallet configuration is + defined by a code block that begins with `impl $PALLET_NAME::Trait for Runtime`. +- The pallets are composed into a single runtime by way of the + [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) + macro, which is part of the core + [FRAME Support](https://substrate.dev/docs/en/knowledgebase/runtime/frame#support-library) + library. + +### Pallets + +The runtime in this project is constructed using many FRAME pallets that ship with the +[core Substrate repository](https://github.com/paritytech/substrate/tree/master/frame) and a +template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs) directory. + +A FRAME pallet is compromised of a number of blockchain primitives: + +- Storage: FRAME defines a rich set of powerful + [storage abstractions](https://substrate.dev/docs/en/knowledgebase/runtime/storage) that makes + it easy to use Substrate's efficient key-value database to manage the evolving state of a + blockchain. +- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) + from outside of the runtime in order to update its state. +- Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to + notify users of important changes in the runtime. +- Errors: When a dispatchable fails, it returns an error. +- Trait: The `Trait` configuration interface is used to define the types and parameters upon which + a FRAME pallet depends. + ## Generate a Custom Node Template Generate a Substrate node template based on a particular commit by running the following commands: diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 3edef794686..e49457b0b9a 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -8,13 +8,13 @@ use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{Verify, IdentifyAccount}; use sc_service::ChainType; -// Note this is the URL for the telemetry server -//const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +// The URL for the telemetry server. +// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; -/// Helper function to generate a crypto pair from seed +/// Generate a crypto pair from seed. pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") @@ -23,14 +23,14 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; -/// Helper function to generate an account ID from seed +/// Generate an account ID from seed. pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic: From<::Public> { AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Helper function to generate an authority key for Aura +/// Generate an Aura authority key. pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { ( get_from_seed::(s), @@ -42,15 +42,20 @@ pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; Ok(ChainSpec::from_genesis( + // Name "Development", + // ID "dev", ChainType::Development, move || testnet_genesis( wasm_binary, + // Initial PoA authorities vec![ authority_keys_from_seed("Alice"), ], + // Sudo account get_account_id_from_seed::("Alice"), + // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -59,10 +64,15 @@ pub fn development_config() -> Result { ], true, ), + // Bootnodes vec![], + // Telemetry None, + // Protocol ID None, + // Properties None, + // Extensions None, )) } @@ -71,16 +81,21 @@ pub fn local_testnet_config() -> Result { let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; Ok(ChainSpec::from_genesis( + // Name "Local Testnet", + // ID "local_testnet", ChainType::Local, move || testnet_genesis( wasm_binary, + // Initial PoA authorities vec![ authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob"), ], + // Sudo account get_account_id_from_seed::("Alice"), + // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -97,14 +112,20 @@ pub fn local_testnet_config() -> Result { ], true, ), + // Bootnodes vec![], + // Telemetry None, + // Protocol ID None, + // Properties None, + // Extensions None, )) } +/// Configure initial storage state for FRAME modules. fn testnet_genesis( wasm_binary: &[u8], initial_authorities: Vec<(AuraId, GrandpaId)>, @@ -114,10 +135,12 @@ fn testnet_genesis( ) -> GenesisConfig { GenesisConfig { system: Some(SystemConfig { + // Add Wasm runtime to storage. code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), balances: Some(BalancesConfig { + // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }), aura: Some(AuraConfig { @@ -127,6 +150,7 @@ fn testnet_genesis( authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), }), sudo: Some(SudoConfig { + // Assign network admin rights. key: root_key, }), } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index fd1766c112b..a025a9b929e 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -6,7 +6,7 @@ version = "2.0.0-rc5" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "FRAME pallet template" +description = "FRAME pallet template for defining custom runtime logic." [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index efcce785cce..729a71278aa 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -1,13 +1,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -/// A FRAME pallet template with necessary imports - -/// Feel free to remove or edit this file as needed. -/// If you change the name of this file, make sure to update its references in runtime/src/lib.rs -/// If you remove this file, you can remove those references - -/// For more guidance on Substrate FRAME, see the example pallet -/// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs +/// Edit this file to define custom logic or remove it if it is not needed. +/// Learn more about FRAME and the core library of Substrate FRAME pallets: +/// https://substrate.dev/docs/en/knowledgebase/runtime/frame use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch, traits::Get}; use frame_system::ensure_signed; @@ -18,89 +13,87 @@ mod mock; #[cfg(test)] mod tests; -/// The pallet's configuration trait. +/// Configure the pallet by specifying the parameters and types on which it depends. pub trait Trait: frame_system::Trait { - // Add other types and constants required to configure this pallet. - - /// The overarching event type. + /// Because this pallet emits events, it depends on the runtime's definition of an event. type Event: From> + Into<::Event>; } -// This pallet's storage items. +// The pallet's runtime storage items. +// https://substrate.dev/docs/en/knowledgebase/runtime/storage decl_storage! { - // It is important to update your storage name so that your pallet's - // storage items are isolated from other pallets. + // A unique name is used to ensure that the pallet's storage items are isolated. + // This name may be updated, but each pallet in the runtime must use a unique name. // ---------------------------------vvvvvvvvvvvvvv trait Store for Module as TemplateModule { - // Just a dummy storage item. - // Here we are declaring a StorageValue, `Something` as a Option - // `get(fn something)` is the default getter which returns either the stored `u32` or `None` if nothing stored + // Learn more about declaring storage items: + // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items Something get(fn something): Option; } } -// The pallet's events +// Pallets use events to inform users when important changes are made. +// https://substrate.dev/docs/en/knowledgebase/runtime/events decl_event!( pub enum Event where AccountId = ::AccountId { - /// Just a dummy event. - /// Event `Something` is declared with a parameter of the type `u32` and `AccountId` - /// To emit this event, we call the deposit function, from our runtime functions - /// [something, who] + /// Event documentation should end with an array that provides descriptive names for event + /// parameters. [something, who] SomethingStored(u32, AccountId), } ); -// The pallet's errors +// Errors inform users that something went wrong. decl_error! { pub enum Error for Module { - /// Value was None + /// Error names should be descriptive. NoneValue, - /// Value reached maximum and cannot be incremented further + /// Errors should have helpful documentation associated with them. StorageOverflow, } } -// The pallet's dispatchable functions. +// Dispatchable functions allows users to interact with the pallet and invoke state changes. +// These functions materialize as "extrinsics", which are often compared to transactions. +// Dispatchable functions must be annotated with a weight and must return a DispatchResult. decl_module! { - /// The module declaration. pub struct Module for enum Call where origin: T::Origin { - // Initializing errors - // this includes information about your errors in the node's metadata. - // it is needed only if you are using errors in your pallet + // Errors must be initialized if they are used by the pallet. type Error = Error; - // Initializing events - // this is needed only if you are using events in your pallet + // Events must be initialized if they are used by the pallet. fn deposit_event() = default; - /// Just a dummy entry point. - /// function that can be called by the external world as an extrinsics call - /// takes a parameter of the type `AccountId`, stores it, and emits an event + /// An example dispatchable that takes a singles value as a parameter, writes the value to + /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[weight = 10_000 + T::DbWeight::get().writes(1)] pub fn do_something(origin, something: u32) -> dispatch::DispatchResult { - // Check it was signed and get the signer. See also: ensure_root and ensure_none + // Check that the extrinsic was signed and get the signer. + // This function will return an error if the extrinsic is not signed. + // https://substrate.dev/docs/en/knowledgebase/runtime/origin let who = ensure_signed(origin)?; - // Code to execute when something calls this. - // For example: the following line stores the passed in u32 in the storage + // Update storage. Something::put(something); - // Here we are raising the Something event + // Emit an event. Self::deposit_event(RawEvent::SomethingStored(something, who)); + // Return a successful DispatchResult Ok(()) } - /// Another dummy entry point. - /// takes no parameters, attempts to increment storage value, and possibly throws an error + /// An example dispatchable that may throw a custom error. #[weight = 10_000 + T::DbWeight::get().reads_writes(1,1)] pub fn cause_error(origin) -> dispatch::DispatchResult { - // Check it was signed and get the signer. See also: ensure_root and ensure_none let _who = ensure_signed(origin)?; + // Read a value from storage. match Something::get() { + // Return an error if the value has not been set. None => Err(Error::::NoneValue)?, Some(old) => { + // Increment the value read from storage; will error in the event of overflow. let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; + // Update the value in storage with the incremented result. Something::put(new); Ok(()) }, diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 130a782bb7b..8c3bf2b4047 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -1,5 +1,3 @@ -// Creating mock runtime here - use crate::{Module, Trait}; use sp_core::H256; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; @@ -12,9 +10,8 @@ impl_outer_origin! { pub enum Origin for Test {} } -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. +// Configure a mock runtime to test the pallet. + #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { @@ -23,6 +20,7 @@ parameter_types! { pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } + impl system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; @@ -50,13 +48,14 @@ impl system::Trait for Test { type OnKilledAccount = (); type SystemWeightInfo = (); } + impl Trait for Test { type Event = (); } + pub type TemplateModule = Module; -// This function basically just builds a genesis storage key/value store according to -// our desired mockup. +// Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { system::GenesisConfig::default().build_storage::().unwrap().into() } diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs index ec123a50c7c..3356b29ff35 100644 --- a/bin/node-template/pallets/template/src/tests.rs +++ b/bin/node-template/pallets/template/src/tests.rs @@ -1,15 +1,12 @@ -// Tests to be written here - use crate::{Error, mock::*}; use frame_support::{assert_ok, assert_noop}; #[test] fn it_works_for_default_value() { new_test_ext().execute_with(|| { - // Just a dummy test for the dummy function `do_something` - // calling the `do_something` function with a value 42 + // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); - // asserting that the stored value is equal to what we stored + // Read pallet storage and assert an expected result. assert_eq!(TemplateModule::something(), Some(42)); }); } @@ -17,7 +14,7 @@ fn it_works_for_default_value() { #[test] fn correct_error_for_none_value() { new_test_ext().execute_with(|| { - // Ensure the correct error is thrown on None value + // Ensure the expected error is thrown when no value is present. assert_noop!( TemplateModule::cause_error(Origin::signed(1)), Error::::NoneValue diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 04acf8fa7a9..c46d515a3ef 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -1,5 +1,3 @@ -//! The Substrate Node Template runtime. This can be compiled with `#[no_std]`, ready for Wasm. - #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit="256"] @@ -40,7 +38,7 @@ pub use frame_support::{ }, }; -/// Importing a template pallet +/// Import the template pallet. pub use template; /// An index to a block. @@ -93,7 +91,6 @@ pub mod opaque { } } -/// This runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), @@ -108,7 +105,7 @@ pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; -// These time units are defined in number of blocks. +// Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; @@ -134,6 +131,8 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } +// Configure FRAME pallets to include in runtime. + impl system::Trait for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = (); @@ -258,11 +257,12 @@ impl sudo::Trait for Runtime { type Call = Call; } -/// Used for the module template in `./template.rs` +/// Configure the pallet template in pallets/template. impl template::Trait for Runtime { type Event = Event; } +// Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime where Block = Block, @@ -277,7 +277,7 @@ construct_runtime!( Balances: balances::{Module, Call, Storage, Config, Event}, TransactionPayment: transaction_payment::{Module, Storage}, Sudo: sudo::{Module, Call, Config, Storage, Event}, - // Used for the module template in `./template.rs` + // Include the custom logic from the template pallet in the runtime. TemplateModule: template::{Module, Call, Storage, Event}, } ); -- GitLab From fd07710c54497b4fd1a92463df0b294027212635 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 24 Jul 2020 16:45:20 +0200 Subject: [PATCH 002/132] Switching from git back to released versions for wasmtime, fix cargo-unleash (#6722) * Switching from git back to released versions for wasmtime * filter out cratelift_codegen messages-a Co-authored-by: NikVolf --- Cargo.lock | 49 +++++++++++++++++++---------- client/cli/src/lib.rs | 1 + client/executor/wasmtime/Cargo.toml | 10 +++--- 3 files changed, 38 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8a21ea6dd4..9f4efcf59bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -815,7 +815,8 @@ checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" [[package]] name = "cranelift-bforest" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dcc286b052ee24a1e5a222e7c1125e6010ad35b0f248709b9b3737a8fedcfdf" dependencies = [ "cranelift-entity", ] @@ -823,7 +824,8 @@ dependencies = [ [[package]] name = "cranelift-codegen" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" dependencies = [ "byteorder", "cranelift-bforest", @@ -842,7 +844,8 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3f460031861e4f4ad510be62b2ae50bba6cc886b598a36f9c0a970feab9598" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -851,12 +854,14 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ad12409e922e7697cd0bdc7dc26992f64a77c31880dfe5e3c7722f4710206d" [[package]] name = "cranelift-entity" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97cdc58972ea065d107872cfb9079f4c92ade78a8af85aaff519a65b5d13f71" dependencies = [ "serde", ] @@ -864,7 +869,8 @@ dependencies = [ [[package]] name = "cranelift-frontend" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" dependencies = [ "cranelift-codegen", "log", @@ -875,7 +881,8 @@ dependencies = [ [[package]] name = "cranelift-native" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e69d44d59826eef6794066ac2c0f4ad3975f02d97030c60dbc04e3886adf36e" dependencies = [ "cranelift-codegen", "raw-cpuid", @@ -885,7 +892,8 @@ dependencies = [ [[package]] name = "cranelift-wasm" version = "0.66.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "979df666b1304624abe99738e9e0e7c7479ee5523ba4b8b237df9ff49996acbb" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -5829,9 +5837,9 @@ dependencies = [ [[package]] name = "regalloc" -version = "0.0.28" +version = "0.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3598bed0895fe0f72a9e0b00ef9e3a3c8af978a8401b2f2046dec5927de6364a" +checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" dependencies = [ "log", "rustc-hash", @@ -8631,7 +8639,8 @@ version = "1.0.6" [[package]] name = "substrate-wasmtime" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a69f5b3afef86e3e372529bf3fb1f7219b20287c4490e4cb4b4e91970f4f5" dependencies = [ "anyhow", "backtrace", @@ -9703,7 +9712,8 @@ checksum = "a950e6a618f62147fd514ff445b2a0b53120d382751960797f85f058c7eda9b9" [[package]] name = "wasmtime-debug" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e634af9067a3af6cf2c7d33dc3b84767ddaf5d010ba68e80eecbcea73d4a349" dependencies = [ "anyhow", "gimli 0.21.0", @@ -9718,7 +9728,8 @@ dependencies = [ [[package]] name = "wasmtime-environ" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f85619a94ee4034bd5bb87fc3dcf71fd2237b81c840809da1201061eec9ab3" dependencies = [ "anyhow", "base64 0.12.3", @@ -9748,7 +9759,8 @@ dependencies = [ [[package]] name = "wasmtime-jit" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" dependencies = [ "anyhow", "cfg-if", @@ -9776,7 +9788,8 @@ dependencies = [ [[package]] name = "wasmtime-obj" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e81d8e02e9bc9fe2da9b6d48bbc217f96e089f7df613f11a28a3958abc44641e" dependencies = [ "anyhow", "more-asserts", @@ -9789,7 +9802,8 @@ dependencies = [ [[package]] name = "wasmtime-profiling" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" dependencies = [ "anyhow", "cfg-if", @@ -9807,7 +9821,8 @@ dependencies = [ [[package]] name = "wasmtime-runtime" version = "0.19.0" -source = "git+https://github.com/paritytech/wasmtime?branch=update-upstream#f744c4e564b40a4cfce6a7090f093ec1726c68e9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" dependencies = [ "backtrace", "cc", diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c7f48d27214..7899e48b0a2 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -236,6 +236,7 @@ pub fn init_logger(pattern: &str) { // Disable info logging by default for some modules: builder.filter(Some("ws"), log::LevelFilter::Off); builder.filter(Some("yamux"), log::LevelFilter::Off); + builder.filter(Some("cranelift_codegen"), log::LevelFilter::Off); builder.filter(Some("hyper"), log::LevelFilter::Warn); builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); // Always log the special target `sc_tracing`, overrides global level diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index e4ee9794071..b7891c5affc 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -21,11 +21,11 @@ sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-in sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } -wasmtime = { package = "substrate-wasmtime", git = "https://github.com/paritytech/wasmtime", branch = "update-upstream" } -wasmtime-runtime = { git = "https://github.com/paritytech/wasmtime", branch = "update-upstream" } -wasmtime-environ = { git = "https://github.com/paritytech/wasmtime", branch = "update-upstream" } -cranelift-wasm = { git = "https://github.com/paritytech/wasmtime", branch = "update-upstream" } -cranelift-codegen = { git = "https://github.com/paritytech/wasmtime", branch = "update-upstream" } +wasmtime = { package = "substrate-wasmtime", version = "0.19.0" } +wasmtime-runtime = { version = "0.19.0" } +wasmtime-environ = { version = "0.19.0" } +cranelift-wasm = { version = "0.66.0" } +cranelift-codegen = { version = "0.66.0" } [dev-dependencies] -- GitLab From e00d78cb1c354001d868fa66938a827a432dc530 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 24 Jul 2020 18:10:00 +0200 Subject: [PATCH 003/132] adding changelog (#6728) --- docs/CHANGELOG.md | 48 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 78fc85acc62..333733d1aee 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,6 +6,54 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.0-rc4 -> 2.0.0-rc5 – River Dolphin + +Runtime +------- + +* Support using system storage directly for EVM balance and nonce (#6659) +* Properly filter out duplicate voters in elections. (#6693) +* Treasury burning can be directed (#6671) +* identity: Don't let subs be re-registered (#6667) +* Regression test to ensure we don't break deterministic builds in wasm (#6597) +* allow to specify schedule time as a relative value (#6578) +* Make signature batching use specialized methods (#6616) +* Rename `CheckEra` to `CheckMortality` (#6619) +* Add `WeightInfo` to all pallets with benchmarks. (#6575) +* Don't require module name in inherents (#6576) +* pallet-evm: return Ok(()) when EVM execution fails (#6493) +* Make the encoded-Call Vec explicitly so in metadata (#6566) +* Allow specify schedule dispatch origin (#6387) +* pallet-evm: customizable chain id (#6537) +* Refactor as_sub to make things clearer. (#6503) + +Client +------ + +* Update wasmtime to (almost) lastest master (#6662) +* Update to latest sysinfo prevents leaking fd-handlers (#6708) +* Tracing values (#6679) +* Graceful shutdown for the task manager (#6654) +* Update substrate-networking Grafana dashboard (#6649) +* *: Update to libp2p v0.21.1 (#6559) +* Send Status message on all newly-opened legacy substreams (#6593) +* babe: report equivocations (#6362) +* Support synching of blocks that are not `new_best` (#6508) +* Remove the service, replacing it with a struct of individual chain components (#6352) +* Fix tx-pool returning the same transaction multiple times (#6535) + +API +--- + +* Better handling of stable-only build (#6569) +* Remove the service builder (#6557) +* seal: Prevent contracts from going below subsistence (#6623) +* seal: Rework contracts API (#6573) +* Make evm errors public (#6598) +* Add log rotation (#6564) +* decl_module! macro: use 'frame_system' instead of `system` as default ident (#6500) +* Restrict `Protected` to some heap types. (#6471) + ## 2.0.0-rc3 -> 2.0.0-rc4 (Rhinoceros) Runtime -- GitLab From 342deb787275b50539a6c3f05019489fbf0790f4 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 24 Jul 2020 18:37:17 +0200 Subject: [PATCH 004/132] fixing CI --- .gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9fd6bb74e10..f97d857e2b8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -229,6 +229,7 @@ cargo-check-subkey: test-deterministic-wasm: stage: test <<: *docker-env + <<: *docker-env-only variables: <<: *default-vars except: @@ -452,6 +453,7 @@ build-linux-subkey: &build-subkey stage: build <<: *collect-artifacts <<: *docker-env + <<: *docker-env-only <<: *build-only needs: - job: cargo-check-subkey -- GitLab From 1502626e45a704d1e2852e38ad669687ef02f68b Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Fri, 24 Jul 2020 18:48:35 +0200 Subject: [PATCH 005/132] remove breaking excepts --- .gitlab-ci.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f97d857e2b8..82368843d11 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -219,8 +219,6 @@ cargo-check-subkey: stage: test <<: *docker-env <<: *docker-env-only - except: - - /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - cd ./bin/utils/subkey - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release @@ -302,8 +300,6 @@ test-frame-examples-compile-to-wasm: test-linux-stable-int: <<: *test-linux except: - refs: - - /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 variables: - $DEPLOY_TAG script: @@ -327,8 +323,6 @@ check-web-wasm: stage: test <<: *docker-env <<: *docker-env-only - except: - - /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: # WASM support is in progress. As more and more crates support WASM, we # should add entries here. See https://github.com/paritytech/substrate/issues/2416 -- GitLab From bf0e1ec1006e55b080b6d7314107a6ee571072e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 24 Jul 2020 22:02:12 +0100 Subject: [PATCH 006/132] grandpa: allow noting that the set has stalled (#6725) * grandpa: remove unused methods to convert digest * grandpa: add root extrinsic for scheduling forced change * grandpa: add benchmark for schedule_forced_change * grandpa: don't take authority weight in schedule_forced_change * grandpa: add const for default forced change delay * grandpa: adjust weights after benchmark on ref hardware * grandpa: fix cleanup of forced changes on standard change application * grandpa: replace schedule_forced_change with note_stalled * grandpa: always trigger a session change when the set is stalled * grandpa: fix bug on set id mutation after failed scheduled change * grandpa: take delay as parameter in note_stalled * grandpa: fix tests * grandpa: fix cleanup of forced changes * grandpa: add test for forced changes cleanup * grandpa: add test for session rotation set id * grandpa: add test for scheduling of forced changes on new session --- client/finality-grandpa/src/authorities.rs | 138 ++++++++++++++++++++- frame/grandpa/src/benchmarking.rs | 13 +- frame/grandpa/src/lib.rs | 92 +++++++------- frame/grandpa/src/mock.rs | 27 ++-- frame/grandpa/src/tests.rs | 121 ++++++++++++++---- 5 files changed, 304 insertions(+), 87 deletions(-) diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index b4cb254864d..117f5cad5e3 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -423,9 +423,21 @@ where fork_tree::FinalizationResult::Changed(change) => { status.changed = true; - // if we are able to finalize any standard change then we can - // discard all pending forced changes (on different forks) - self.pending_forced_changes.clear(); + let pending_forced_changes = std::mem::replace( + &mut self.pending_forced_changes, + Vec::new(), + ); + + // we will keep all forced change for any later blocks and that are a + // descendent of the finalized block (i.e. they are from this fork). + for change in pending_forced_changes { + if change.effective_number() > finalized_number && + is_descendent_of(&finalized_hash, &change.canon_hash) + .map_err(fork_tree::Error::Client)? + { + self.pending_forced_changes.push(change) + } + } if let Some(change) = change { afg_log!(initial_sync, @@ -1163,4 +1175,124 @@ mod tests { Err(Error::InvalidAuthoritySet) )); } + + #[test] + fn cleans_up_stale_forced_changes_when_applying_standard_change() { + let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: current_authorities.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let new_set = current_authorities.clone(); + + // Create the following pending changes tree: + // + // [#C3] + // / + // /- (#C2) + // / + // (#A) - (#B) - [#C1] + // \ + // (#C0) - [#D] + // + // () - Standard change + // [] - Forced change + + let is_descendent_of = { + let hashes = vec!["B", "C0", "C1", "C2", "C3", "D"]; + is_descendent_of(move |base, hash| match (*base, *hash) { + ("B", "B") => false, // required to have the simpler case below + ("A", b) | ("B", b) => hashes.iter().any(|h| *h == b), + ("C0", "D") => true, + _ => false, + }) + }; + + let mut add_pending_change = |canon_height, canon_hash, forced| { + let change = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height, + canon_hash, + delay_kind: if forced { + DelayKind::Best { + median_last_finalized: 0, + } + } else { + DelayKind::Finalized + }, + }; + + authorities + .add_pending_change(change, &is_descendent_of) + .unwrap(); + }; + + add_pending_change(5, "A", false); + add_pending_change(10, "B", false); + add_pending_change(15, "C0", false); + add_pending_change(15, "C1", true); + add_pending_change(15, "C2", false); + add_pending_change(15, "C3", true); + add_pending_change(20, "D", true); + + println!( + "pending_changes: {:?}", + authorities + .pending_changes() + .map(|c| c.canon_hash) + .collect::>() + ); + + // applying the standard change at A should not prune anything + // other then the change that was applied + authorities + .apply_standard_changes("A", 5, &is_descendent_of, false) + .unwrap(); + println!( + "pending_changes: {:?}", + authorities + .pending_changes() + .map(|c| c.canon_hash) + .collect::>() + ); + + assert_eq!(authorities.pending_changes().count(), 6); + + // same for B + authorities + .apply_standard_changes("B", 10, &is_descendent_of, false) + .unwrap(); + + assert_eq!(authorities.pending_changes().count(), 5); + + let authorities2 = authorities.clone(); + + // finalizing C2 should clear all forced changes + authorities + .apply_standard_changes("C2", 15, &is_descendent_of, false) + .unwrap(); + + assert_eq!(authorities.pending_forced_changes.len(), 0); + + // finalizing C0 should clear all forced changes but D + let mut authorities = authorities2; + authorities + .apply_standard_changes("C0", 15, &is_descendent_of, false) + .unwrap(); + + assert_eq!(authorities.pending_forced_changes.len(), 1); + assert_eq!( + authorities + .pending_forced_changes + .first() + .unwrap() + .canon_hash, + "D" + ); + } } diff --git a/frame/grandpa/src/benchmarking.rs b/frame/grandpa/src/benchmarking.rs index 18f6f62fa44..048f99fff7a 100644 --- a/frame/grandpa/src/benchmarking.rs +++ b/frame/grandpa/src/benchmarking.rs @@ -19,8 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -use super::*; +use super::{*, Module as Grandpa}; use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; use sp_core::H256; benchmarks! { @@ -62,6 +63,15 @@ benchmarks! { } verify { assert!(sp_finality_grandpa::check_equivocation_proof(equivocation_proof2)); } + + note_stalled { + let delay = 1000.into(); + let best_finalized_block_number = 1.into(); + + }: _(RawOrigin::Root, delay, best_finalized_block_number) + verify { + assert!(Grandpa::::stalled().is_some()); + } } #[cfg(test)] @@ -74,6 +84,7 @@ mod tests { fn test_benchmarks() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { assert_ok!(test_benchmark_check_equivocation_proof::()); + assert_ok!(test_benchmark_note_stalled::()); }) } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index c903e081e72..961c0994607 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -43,9 +43,10 @@ use frame_support::{ decl_error, decl_event, decl_module, decl_storage, storage, traits::KeyOwnerProofSystem, Parameter, }; -use frame_system::{ensure_none, ensure_signed, DigestOf}; +use frame_system::{ensure_none, ensure_root, ensure_signed}; +use pallet_finality_tracker::OnFinalizationStalled; use sp_runtime::{ - generic::{DigestItem, OpaqueDigestItemId}, + generic::DigestItem, traits::Zero, DispatchResult, KeyTypeId, }; @@ -205,7 +206,7 @@ decl_storage! { State get(fn state): StoredState = StoredState::Live; /// Pending change: (signaled at, scheduled change). - PendingChange: Option>; + PendingChange get(fn pending_change): Option>; /// next block number where we can force a change. NextForced get(fn next_forced): Option; @@ -280,6 +281,24 @@ decl_module! { )?; } + /// Note that the current authority set of the GRANDPA finality gadget has + /// stalled. This will trigger a forced authority set change at the beginning + /// of the next session, to be enacted `delay` blocks after that. The delay + /// should be high enough to safely assume that the block signalling the + /// forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters + /// will start the new authority set using the given finalized block as base. + /// Only callable by root. + #[weight = weight_for::note_stalled::()] + fn note_stalled( + origin, + delay: T::BlockNumber, + best_finalized_block_number: T::BlockNumber, + ) { + ensure_root(origin)?; + + Self::on_stalled(delay, best_finalized_block_number) + } + fn on_finalize(block_number: T::BlockNumber) { // check for scheduled pending authority set changes if let Some(pending_change) = >::get() { @@ -295,7 +314,7 @@ decl_module! { )) } else { Self::deposit_log(ConsensusLog::ScheduledChange( - ScheduledChange{ + ScheduledChange { delay: pending_change.delay, next_authorities: pending_change.next_authorities.clone(), } @@ -377,6 +396,11 @@ mod weight_for { // fetching set id -> session index mappings .saturating_add(T::DbWeight::get().reads(2)) } + + pub fn note_stalled() -> Weight { + (3 * WEIGHT_PER_MICROS) + .saturating_add(T::DbWeight::get().writes(1)) + } } impl Module { @@ -580,42 +604,6 @@ impl Module { } } -impl Module { - /// Attempt to extract a GRANDPA log from a generic digest. - pub fn grandpa_log(digest: &DigestOf) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - digest.convert_first(|l| l.try_to::>(id)) - } - - /// Attempt to extract a pending set-change signal from a digest. - pub fn pending_change(digest: &DigestOf) - -> Option> - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_change()) - } - - /// Attempt to extract a forced set-change signal from a digest. - pub fn forced_change(digest: &DigestOf) - -> Option<(T::BlockNumber, ScheduledChange)> - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change()) - } - - /// Attempt to extract a pause signal from a digest. - pub fn pending_pause(digest: &DigestOf) - -> Option - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause()) - } - - /// Attempt to extract a resume signal from a digest. - pub fn pending_resume(digest: &DigestOf) - -> Option - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_resume()) - } -} - impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } @@ -638,14 +626,26 @@ impl pallet_session::OneSessionHandler for Module // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic // identities have changed. - let current_set_id = if changed { + let current_set_id = if changed || >::exists() { let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - if let Some((further_wait, median)) = >::take() { - let _ = Self::schedule_change(next_authorities, further_wait, Some(median)); + + let res = if let Some((further_wait, median)) = >::take() { + Self::schedule_change(next_authorities, further_wait, Some(median)) + } else { + Self::schedule_change(next_authorities, Zero::zero(), None) + }; + + if res.is_ok() { + CurrentSetId::mutate(|s| { + *s += 1; + *s + }) } else { - let _ = Self::schedule_change(next_authorities, Zero::zero(), None); + // either the session module signalled that the validators have changed + // or the set was stalled. but since we didn't successfully schedule + // an authority set change we do not increment the set id. + Self::current_set_id() } - CurrentSetId::mutate(|s| { *s += 1; *s }) } else { // nothing's changed, neither economic conditions nor session keys. update the pointer // of the current set. @@ -663,7 +663,7 @@ impl pallet_session::OneSessionHandler for Module } } -impl pallet_finality_tracker::OnFinalizationStalled for Module { +impl OnFinalizationStalled for Module { fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) { // when we record old authority sets, we can use `pallet_finality_tracker::median` // to figure out _who_ failed. until then, we can't meaningfully guard diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 6291a2f82f1..684712df7d0 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -365,23 +365,18 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx } pub fn start_session(session_index: SessionIndex) { - let mut parent_hash = System::parent_hash(); - for i in Session::current_index()..session_index { + System::on_finalize(System::block_number()); + Session::on_finalize(System::block_number()); Staking::on_finalize(System::block_number()); - System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() * 6000); + Grandpa::on_finalize(System::block_number()); - // In order to be able to use `System::parent_hash()` in the tests - // we need to first get it via `System::finalize` and then set it - // the `System::initialize`. However, it is needed to be taken into - // consideration that finalizing will prune some data in `System` - // storage including old values `BlockHash` if that reaches above - // `BlockHashCount` capacity. - if System::block_number() > 1 { + let parent_hash = if System::block_number() > 1 { let hdr = System::finalize(); - parent_hash = hdr.hash(); - } + hdr.hash() + } else { + System::parent_hash() + }; System::initialize( &(i as u64 + 1), @@ -390,9 +385,13 @@ pub fn start_session(session_index: SessionIndex) { &Default::default(), Default::default(), ); + System::set_block_number((i + 1).into()); + Timestamp::set_timestamp(System::block_number() * 6000); - Session::on_initialize(System::block_number()); System::on_initialize(System::block_number()); + Session::on_initialize(System::block_number()); + Staking::on_initialize(System::block_number()); + Grandpa::on_initialize(System::block_number()); } assert_eq!(Session::current_index(), session_index); diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index f4b353c0fa0..9eca2cc3813 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -28,6 +28,7 @@ use frame_support::{ traits::{Currency, OnFinalize}, }; use frame_system::{EventRecord, Phase}; +use pallet_session::OneSessionHandler; use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::testing::Digest; @@ -342,14 +343,15 @@ fn report_equivocation_current_set_works() { start_era(1); let authorities = Grandpa::grandpa_authorities(); + let validators = Session::validators(); - // make sure that all authorities have the same balance - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); + // make sure that all validators have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(1, i as u64), + Staking::eras_stakers(1, validator), pallet_staking::Exposure { total: 10_000, own: 10_000, @@ -388,11 +390,12 @@ fn report_equivocation_current_set_works() { start_era(2); // check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); + let equivocation_validator_id = validators[equivocation_authority_index]; + assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); + assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, 0), + Staking::eras_stakers(2, equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, @@ -401,12 +404,16 @@ fn report_equivocation_current_set_works() { ); // check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); + for validator in &validators { + if *validator == equivocation_validator_id { + continue; + } + + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, i as u64), + Staking::eras_stakers(2, validator), pallet_staking::Exposure { total: 10_000, own: 10_000, @@ -425,6 +432,7 @@ fn report_equivocation_old_set_works() { start_era(1); let authorities = Grandpa::grandpa_authorities(); + let validators = Session::validators(); let equivocation_authority_index = 0; let equivocation_key = &authorities[equivocation_authority_index].0; @@ -436,12 +444,12 @@ fn report_equivocation_old_set_works() { start_era(2); // make sure that all authorities have the same balance - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(2, i as u64), + Staking::eras_stakers(2, validator), pallet_staking::Exposure { total: 10_000, own: 10_000, @@ -474,11 +482,13 @@ fn report_equivocation_old_set_works() { start_era(3); // check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); + let equivocation_validator_id = validators[equivocation_authority_index]; + + assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); + assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, 0), + Staking::eras_stakers(3, equivocation_validator_id), pallet_staking::Exposure { total: 0, own: 0, @@ -487,12 +497,16 @@ fn report_equivocation_old_set_works() { ); // check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); + for validator in &validators { + if *validator == equivocation_validator_id { + continue; + } + + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( - Staking::eras_stakers(3, i as u64), + Staking::eras_stakers(3, validator), pallet_staking::Exposure { total: 10_000, own: 10_000, @@ -767,3 +781,64 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); }); } + +#[test] +fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() { + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + assert_eq!(Grandpa::current_set_id(), 0); + + // starting a new era should lead to a change in the session + // validators and trigger a new set + start_era(1); + assert_eq!(Grandpa::current_set_id(), 1); + + // we schedule a change delayed by 2 blocks, this should make it so that + // when we try to rotate the session at the beginning of the era we will + // fail to schedule a change (there's already one pending), so we should + // not increment the set id. + Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap(); + start_era(2); + assert_eq!(Grandpa::current_set_id(), 1); + + // everything should go back to normal after. + start_era(3); + assert_eq!(Grandpa::current_set_id(), 2); + + // session rotation might also fail to schedule a change if it's for a + // forced change (i.e. grandpa is stalled) and it is too soon. + >::put(1000); + >::put((30, 1)); + + // NOTE: we cannot go through normal era rotation since having `Stalled` + // defined will also trigger a new set (regardless of whether the + // session validators changed) + Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty()); + assert_eq!(Grandpa::current_set_id(), 2); + }); +} + +#[test] +fn always_schedules_a_change_on_new_session_when_stalled() { + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + start_era(1); + + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::current_set_id(), 1); + + // if the session handler reports no change then we should not schedule + // any pending change + Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); + + assert!(Grandpa::pending_change().is_none()); + assert_eq!(Grandpa::current_set_id(), 1); + + // if grandpa is stalled then we should **always** schedule a forced + // change on a new session + >::put((10, 1)); + Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty()); + + assert!(Grandpa::pending_change().is_some()); + assert!(Grandpa::pending_change().unwrap().forced.is_some()); + assert_eq!(Grandpa::current_set_id(), 2); + }); +} -- GitLab From 8180062d6c1c542015988527fecf320974b21051 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 26 Jul 2020 14:56:17 +0200 Subject: [PATCH 007/132] Name all the tasks! (#6726) * Remove any implementation of `Spawn` or `Executor` from our task executors * Fix compilation * Rename `SpawnBlockingExecutor` * Update primitives/core/src/traits.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Fix tests Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 28 ++++++++ bin/node/bench/src/txpool.rs | 2 +- bin/node/cli/src/service.rs | 2 +- bin/node/testing/src/bench.rs | 20 +++--- client/api/src/lib.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 8 +-- client/basic-authorship/src/lib.rs | 2 +- client/consensus/manual-seal/src/lib.rs | 6 +- client/light/src/call_executor.rs | 15 ++-- client/light/src/fetcher.rs | 16 +++-- client/light/src/lib.rs | 5 +- client/network/src/service/tests.rs | 2 +- client/network/test/src/block_import.rs | 2 +- client/network/test/src/lib.rs | 4 +- client/offchain/src/api.rs | 2 +- client/offchain/src/lib.rs | 2 +- client/rpc/src/author/tests.rs | 2 +- client/rpc/src/lib.rs | 26 +++++++ client/service/src/builder.rs | 10 +-- client/service/src/client/call_executor.rs | 13 ++-- client/service/src/client/client.rs | 8 +-- client/service/src/client/light.rs | 11 ++- client/service/src/lib.rs | 2 +- client/service/src/task_manager/mod.rs | 26 ------- client/service/test/src/client/light.rs | 28 ++++---- client/service/test/src/client/mod.rs | 18 ++--- primitives/api/test/tests/runtime_calls.rs | 4 +- primitives/core/Cargo.toml | 2 + primitives/core/src/lib.rs | 2 - primitives/core/src/tasks.rs | 57 ---------------- primitives/core/src/testing.rs | 8 +-- primitives/core/src/traits.rs | 27 +++++--- primitives/externalities/src/extensions.rs | 6 +- primitives/io/src/batch_verifier.rs | 68 +++++++++++-------- primitives/io/src/lib.rs | 15 ++-- primitives/runtime/src/lib.rs | 6 +- primitives/state-machine/src/basic.rs | 18 ++--- primitives/state-machine/src/lib.rs | 47 +++++++------ primitives/state-machine/src/testing.rs | 5 +- test-utils/client/src/lib.rs | 11 ++- test-utils/runtime/client/src/lib.rs | 7 +- utils/frame/benchmarking-cli/src/command.rs | 7 +- utils/frame/rpc/system/src/lib.rs | 8 +-- 43 files changed, 280 insertions(+), 280 deletions(-) delete mode 100644 primitives/core/src/tasks.rs diff --git a/Cargo.lock b/Cargo.lock index 9f4efcf59bb..40332c7e9e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1202,6 +1202,33 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote 1.0.6", + "syn 1.0.33", +] + +[[package]] +name = "dyn-clone" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" + [[package]] name = "ed25519" version = "1.0.1" @@ -7814,6 +7841,7 @@ dependencies = [ "byteorder", "criterion 0.2.11", "derive_more", + "dyn-clonable", "ed25519-dalek", "futures 0.3.5", "hash-db", diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index 8ac0633ae65..9e579587957 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -71,7 +71,7 @@ impl core::Benchmark for PoolBenchmark { std::thread::park_timeout(std::time::Duration::from_secs(3)); } - let executor = sp_core::testing::SpawnBlockingExecutor::new(); + let executor = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(context.client.clone(), None)), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 46a7318333d..65f2fdb64a8 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -407,7 +407,7 @@ pub fn new_light_base(config: Configuration) -> Result<( } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) -> Result { +pub fn new_light(config: Configuration) -> Result { new_light_base(config).map(|(task_manager, _, _, _, _)| { task_manager }) diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 6eaf0c625dd..8242886fe95 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -50,7 +50,7 @@ use node_runtime::{ AccountId, Signature, }; -use sp_core::{ExecutionContext, blake2_256, traits::CloneableSpawn}; +use sp_core::{ExecutionContext, blake2_256, traits::SpawnNamed, Pair, Public, sr25519, ed25519}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_inherents::InherentData; @@ -58,9 +58,8 @@ use sc_client_api::{ ExecutionStrategy, BlockBackend, execution_extensions::{ExecutionExtensions, ExecutionStrategies}, }; -use sp_core::{Pair, Public, sr25519, ed25519}; use sc_block_builder::BlockBuilderProvider; -use futures::{executor, task}; +use futures::executor; /// Keyring full of accounts for benching. /// @@ -145,7 +144,7 @@ impl BlockType { pub fn to_content(self, size: Option) -> BlockContent { BlockContent { block_type: self, - size: size, + size, } } } @@ -197,16 +196,13 @@ impl TaskExecutor { } } -impl task::Spawn for TaskExecutor { - fn spawn_obj(&self, future: task::FutureObj<'static, ()>) - -> Result<(), task::SpawnError> { - self.pool.spawn_obj(future) +impl SpawnNamed for TaskExecutor { + fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.pool.spawn_ok(future); } -} -impl CloneableSpawn for TaskExecutor { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) + fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.pool.spawn_ok(future); } } diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index bad61f7687a..67706693633 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,7 +37,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn}; +pub use sp_state_machine::{StorageProof, ExecutionStrategy}; /// Usage Information Provider interface /// diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index dd01f8893d2..78bafb2f126 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -358,7 +358,7 @@ mod tests { fn should_cease_building_block_when_deadline_is_reached() { // given let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -412,7 +412,7 @@ mod tests { #[test] fn should_not_panic_when_deadline_is_reached() { let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -448,7 +448,7 @@ mod tests { fn proposed_storage_changes_should_match_execute_block_storage_changes() { let (client, backend) = TestClientBuilder::new().build_with_backend(); let client = Arc::new(client); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -511,7 +511,7 @@ mod tests { fn should_not_remove_invalid_transactions_when_skipping() { // given let mut client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 68356d0a28f..09536044fb8 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -31,7 +31,7 @@ //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); -//! # let spawner = sp_core::testing::SpawnBlockingExecutor::new(); +//! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), //! # Arc::new(FullChainApi::new(client.clone(), None)), diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 77fb5043c5d..2799a498c1f 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -220,7 +220,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), api(), None, RevalidationType::Full, spawner, )); @@ -288,7 +288,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), api(), None, RevalidationType::Full, spawner, )); @@ -360,7 +360,7 @@ mod tests { let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner, )); diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 81be65339b6..fa0f02cd5ae 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -23,14 +23,17 @@ use std::{ }; use codec::{Encode, Decode}; -use sp_core::{convert_hash, NativeOrEncoded, traits::CodeExecutor, offchain::storage::OffchainOverlayedChanges}; +use sp_core::{ + convert_hash, NativeOrEncoded, traits::{CodeExecutor, SpawnNamed}, + offchain::storage::OffchainOverlayedChanges, +}; use sp_runtime::{ generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, }; use sp_externalities::Extensions; use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, + execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, }; use hash_db::Hasher; @@ -220,7 +223,7 @@ pub fn prove_execution( /// Proof should include both environment preparation proof and method execution proof. pub fn check_execution_proof( executor: &E, - spawn_handle: Box, + spawn_handle: Box, request: &RemoteCallRequest
, remote_proof: StorageProof, ) -> ClientResult> @@ -251,7 +254,7 @@ pub fn check_execution_proof( /// Proof should include both environment preparation proof and method execution proof. pub fn check_execution_proof_with_make_header( executor: &E, - spawn_handle: Box, + spawn_handle: Box, request: &RemoteCallRequest
, remote_proof: StorageProof, make_next_header: MakeNextHeader, @@ -275,7 +278,7 @@ pub fn check_execution_proof_with_make_header( let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); let runtime_code = backend_runtime_code.runtime_code()?; - execution_proof_check_on_trie_backend::( + execution_proof_check_on_trie_backend::( &trie_backend, &mut changes, executor, @@ -286,7 +289,7 @@ pub fn check_execution_proof_with_make_header( )?; // execute method - execution_proof_check_on_trie_backend::( + execution_proof_check_on_trie_backend::( &trie_backend, &mut changes, executor, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 88d20cafc90..33113c2fc7d 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -24,8 +24,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; -use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::{convert_hash, traits::{CodeExecutor, SpawnNamed}, storage::{ChildInfo, ChildType}}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -33,7 +32,7 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, + read_child_proof_check, }; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -46,20 +45,23 @@ pub use sc_client_api::{ }, cht, }; -use crate::blockchain::Blockchain; -use crate::call_executor::check_execution_proof; +use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; /// Remote data checker. pub struct LightDataChecker> { blockchain: Arc>, executor: E, - spawn_handle: Box, + spawn_handle: Box, _hasher: PhantomData<(B, H)>, } impl> LightDataChecker { /// Create new light data checker. - pub fn new(blockchain: Arc>, executor: E, spawn_handle: Box) -> Self { + pub fn new( + blockchain: Arc>, + executor: E, + spawn_handle: Box, + ) -> Self { Self { blockchain, executor, spawn_handle, _hasher: PhantomData } diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index deea642bd39..899d1ae31a3 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -19,9 +19,8 @@ //! Light client components. use sp_runtime::traits::{Block as BlockT, HashFor}; -use sc_client_api::CloneableSpawn; use std::sync::Arc; -use sp_core::traits::CodeExecutor; +use sp_core::traits::{CodeExecutor, SpawnNamed}; pub mod backend; pub mod blockchain; @@ -34,7 +33,7 @@ pub use {backend::*, blockchain::*, call_executor::*, fetcher::*}; pub fn new_fetch_checker>( blockchain: Arc>, executor: E, - spawn_handle: Box, + spawn_handle: Box, ) -> LightDataChecker, B, S> where E: CodeExecutor, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 17d9553fa66..0b02153d3d8 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -88,7 +88,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) Box::new(client.clone()), None, None, - &sp_core::testing::SpawnBlockingExecutor::new(), + &sp_core::testing::TaskExecutor::new(), None, )); diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 6762b74b6b8..1d2cd3d687d 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -98,7 +98,7 @@ fn import_single_good_block_without_header_fails() { #[test] fn async_import_queue_drops() { - let executor = sp_core::testing::SpawnBlockingExecutor::new(); + let executor = sp_core::testing::TaskExecutor::new(); // Perform this test multiple times since it exhibits non-deterministic behavior. for _ in 0..100 { let verifier = PassThroughVerifier::new(true); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 30508711a6a..35587cbdc08 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -648,7 +648,7 @@ pub trait TestNetFactory: Sized { Box::new(block_import.clone()), justification_import, finality_proof_import, - &sp_core::testing::SpawnBlockingExecutor::new(), + &sp_core::testing::TaskExecutor::new(), None, )); @@ -728,7 +728,7 @@ pub trait TestNetFactory: Sized { Box::new(block_import.clone()), justification_import, finality_proof_import, - &sp_core::testing::SpawnBlockingExecutor::new(), + &sp_core::testing::TaskExecutor::new(), None, )); diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 0aa5d4ad788..5287ac8251e 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -334,7 +334,7 @@ mod tests { // Compare. assert!(timestamp.unix_millis() > 0); - assert_eq!(timestamp.unix_millis(), d); + assert!(timestamp.unix_millis() >= d); } #[test] diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index f2fb8a8e978..bb2965c6589 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -247,7 +247,7 @@ mod tests { let _ = env_logger::try_init(); let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 37edcbe2632..7736ea0c864 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -61,7 +61,7 @@ impl Default for TestSetup { let client_builder = substrate_test_runtime_client::TestClientBuilder::new(); let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 53a63b449c8..4b2bd200a84 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -22,6 +22,11 @@ #![warn(missing_docs)] +use futures::{compat::Future01CompatExt, FutureExt}; +use rpc::futures::future::{Executor, ExecuteError, Future}; +use sp_core::traits::SpawnNamed; +use std::sync::Arc; + mod metadata; pub use sc_rpc_api::DenyUnsafe; @@ -35,3 +40,24 @@ pub mod state; pub mod system; #[cfg(test)] mod testing; + +/// Task executor that is being used by RPC subscriptions. +#[derive(Clone)] +pub struct SubscriptionTaskExecutor(Arc); + +impl SubscriptionTaskExecutor { + /// Create a new `Self` with the given spawner. + pub fn new(spawn: impl SpawnNamed + 'static) -> Self { + Self(Arc::new(spawn)) + } +} + +impl Executor + Send>> for SubscriptionTaskExecutor { + fn execute( + &self, + future: Box + Send>, + ) -> Result<(), ExecuteError + Send>>> { + self.0.spawn("substrate_rpc_subscription", future.compat().map(drop).boxed()); + Ok(()) + } +} diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index c71746f48a8..a262e6488bb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -24,8 +24,7 @@ use crate::{ config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWorkerConfig}, }; use sc_client_api::{ - light::RemoteBlockchain, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, - ExecutorProvider, + light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; use sc_chain_spec::get_extension; @@ -55,7 +54,7 @@ use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::MaintainedTransactionPool; use prometheus_endpoint::Registry; use sc_client_db::{Backend, DatabaseSettings}; -use sp_core::traits::CodeExecutor; +use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_runtime::BuildStorage; use sc_client_api::{ BlockBackend, BlockchainEvents, @@ -334,7 +333,7 @@ pub fn new_client( fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, - spawn_handle: Box, + spawn_handle: Box, prometheus_registry: Option, config: ClientConfig, ) -> Result<( @@ -750,7 +749,8 @@ fn gen_handler( chain_type: config.chain_spec.chain_type(), }; - let subscriptions = SubscriptionManager::new(Arc::new(spawn_handle)); + let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); + let subscriptions = SubscriptionManager::new(Arc::new(task_executor)); let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) = (remote_blockchain, on_demand) { diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 049bd888b13..1919c76ff48 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -27,9 +27,12 @@ use sp_state_machine::{ }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; -use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor, offchain::storage::OffchainOverlayedChanges}; +use sp_core::{ + NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed}, + offchain::storage::OffchainOverlayedChanges, +}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; -use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; +use sc_client_api::{backend, call_executor::CallExecutor}; use super::client::ClientConfig; /// Call executor that executes methods locally, querying all required @@ -37,7 +40,7 @@ use super::client::ClientConfig; pub struct LocalCallExecutor { backend: Arc, executor: E, - spawn_handle: Box, + spawn_handle: Box, client_config: ClientConfig, } @@ -46,7 +49,7 @@ impl LocalCallExecutor { pub fn new( backend: Arc, executor: E, - spawn_handle: Box, + spawn_handle: Box, client_config: ClientConfig, ) -> Self { LocalCallExecutor { @@ -242,7 +245,7 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( + sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( trie_state, overlay, &self.executor, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 2f101465d51..b152415a4a8 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -92,8 +92,8 @@ use rand::Rng; #[cfg(feature="test-helpers")] use { - sp_core::traits::CodeExecutor, - sc_client_api::{CloneableSpawn, in_mem}, + sp_core::traits::{CodeExecutor, SpawnNamed}, + sc_client_api::in_mem, sc_executor::RuntimeInfo, super::call_executor::LocalCallExecutor, }; @@ -149,7 +149,7 @@ pub fn new_in_mem( genesis_storage: &S, keystore: Option, prometheus_registry: Option, - spawn_handle: Box, + spawn_handle: Box, config: ClientConfig, ) -> sp_blockchain::Result, @@ -189,7 +189,7 @@ pub fn new_with_backend( executor: E, build_genesis_storage: &S, keystore: Option, - spawn_handle: Box, + spawn_handle: Box, prometheus_registry: Option, config: ClientConfig, ) -> sp_blockchain::Result, Block, RA>> diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 8b9b65fc2fa..e8e1286eccd 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -21,17 +21,14 @@ use std::sync::Arc; use sc_executor::RuntimeInfo; -use sp_core::traits::CodeExecutor; +use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_runtime::BuildStorage; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_blockchain::Result as ClientResult; use prometheus_endpoint::Registry; -use super::call_executor::LocalCallExecutor; -use super::client::{Client,ClientConfig}; -use sc_client_api::{ - light::Storage as BlockchainStorage, CloneableSpawn, -}; +use super::{call_executor::LocalCallExecutor, client::{Client, ClientConfig}}; +use sc_client_api::light::Storage as BlockchainStorage; use sc_light::{Backend, GenesisCallExecutor}; @@ -40,7 +37,7 @@ pub fn new_light( backend: Arc>>, genesis_storage: &dyn BuildStorage, code_executor: E, - spawn_handle: Box, + spawn_handle: Box, prometheus_registry: Option, ) -> ClientResult< Client< diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1eef6493e77..ff7573f5fac 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -556,7 +556,7 @@ mod tests { // given let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index bd4b2cfde73..e0e8699ce1d 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -19,8 +19,6 @@ use log::{debug, error}; use futures::{ Future, FutureExt, StreamExt, future::{select, Either, BoxFuture}, - compat::*, - task::{Spawn, FutureObj, SpawnError}, sink::SinkExt, }; use prometheus_endpoint::{ @@ -28,7 +26,6 @@ use prometheus_endpoint::{ PrometheusError, CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 }; -use sc_client_api::CloneableSpawn; use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; @@ -132,14 +129,6 @@ impl SpawnTaskHandle { } } -impl Spawn for SpawnTaskHandle { - fn spawn_obj(&self, future: FutureObj<'static, ()>) - -> Result<(), SpawnError> { - self.spawn("unnamed", future); - Ok(()) - } -} - impl sp_core::traits::SpawnNamed for SpawnTaskHandle { fn spawn_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) { self.spawn_blocking(name, future); @@ -150,21 +139,6 @@ impl sp_core::traits::SpawnNamed for SpawnTaskHandle { } } -impl sc_client_api::CloneableSpawn for SpawnTaskHandle { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) - } -} - -type Boxed01Future01 = Box + Send + 'static>; - -impl futures01::future::Executor for SpawnTaskHandle { - fn execute(&self, future: Boxed01Future01) -> Result<(), futures01::future::ExecuteError>{ - self.spawn("unnamed", future.compat().map(drop)); - Ok(()) - } -} - /// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index e72c290d43b..031c234c1ab 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -37,9 +37,9 @@ use substrate_test_runtime_client::{ runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, }; use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; -use sp_consensus::{BlockOrigin}; +use sp_consensus::BlockOrigin; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; -use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; +use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; use sc_client_api::{ blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, @@ -317,7 +317,7 @@ fn execution_proof_is_generated_and_checked() { // check remote execution proof locally let local_result = check_execution_proof::<_, _, BlakeTwo256>( &local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), header: remote_header, @@ -345,7 +345,7 @@ fn execution_proof_is_generated_and_checked() { // check remote execution proof locally let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( &local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), header: remote_header, @@ -479,7 +479,7 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); (local_checker, remote_block_header, remote_read_proof, heap_pages) } @@ -527,7 +527,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); (local_checker, remote_block_header, remote_read_proof, child_value) } @@ -558,7 +558,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); (local_checker, local_cht_root, remote_block_header, remote_header_proof) } @@ -642,7 +642,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; @@ -717,7 +717,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(local_storage)), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); // check proof on local client @@ -752,7 +752,7 @@ fn check_changes_proof_fails_if_proof_is_wrong() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; @@ -840,7 +840,7 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()).is_err()); @@ -851,7 +851,7 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(local_storage)), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); let result = local_checker.check_changes_tries_proof( 4, &remote_proof.roots, StorageProof::empty() @@ -869,7 +869,7 @@ fn check_body_proof_faulty() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); let body_request = RemoteBodyRequest { @@ -893,7 +893,7 @@ fn check_body_proof_of_same_data_should_succeed() { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), - tasks_executor(), + Box::new(TaskExecutor::new()), ); let body_request = RemoteBodyRequest { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2124f0ced41..8d073df272f 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -40,8 +40,7 @@ use sp_runtime::traits::{ use substrate_test_runtime::TestAPI; use sp_state_machine::backend::Backend as _; use sp_api::{ProvideRuntimeApi, OffchainOverlayedChanges}; -use sp_core::tasks::executor as tasks_executor; -use sp_core::{H256, ChangesTrieConfiguration, blake2_256}; +use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use sp_consensus::{ @@ -165,6 +164,7 @@ fn construct_block( let mut offchain_overlay = OffchainOverlayedChanges::default(); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let task_executor = Box::new(TaskExecutor::new()); StateMachine::new( backend, @@ -176,7 +176,7 @@ fn construct_block( &header.encode(), Default::default(), &runtime_code, - tasks_executor(), + task_executor.clone() as Box<_>, ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -192,7 +192,7 @@ fn construct_block( &tx.encode(), Default::default(), &runtime_code, - tasks_executor(), + task_executor.clone() as Box<_>, ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -208,7 +208,7 @@ fn construct_block( &[], Default::default(), &runtime_code, - tasks_executor(), + task_executor.clone() as Box<_>, ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -262,7 +262,7 @@ fn construct_genesis_should_work_with_native() { &b1data, Default::default(), &runtime_code, - tasks_executor(), + TaskExecutor::new(), ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -298,7 +298,7 @@ fn construct_genesis_should_work_with_wasm() { &b1data, Default::default(), &runtime_code, - tasks_executor(), + TaskExecutor::new(), ).execute( ExecutionStrategy::AlwaysWasm, ).unwrap(); @@ -334,7 +334,7 @@ fn construct_genesis_with_bad_transaction_should_panic() { &b1data, Default::default(), &runtime_code, - tasks_executor(), + TaskExecutor::new(), ).execute( ExecutionStrategy::NativeElseWasm, ); @@ -1743,7 +1743,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), None, None, - sp_core::tasks::executor(), + Box::new(TaskExecutor::new()), Default::default(), ) .unwrap(); diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 6717ab7a3bb..d72872959ce 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -197,11 +197,11 @@ fn record_proof_works() { None, 8, ); - execution_proof_check_on_trie_backend::<_, u64, _>( + execution_proof_check_on_trie_backend::<_, u64, _, _>( &backend, &mut overlay, &executor, - sp_core::tasks::executor(), + sp_core::testing::TaskExecutor::new(), "Core_execute_block", &block.encode(), &runtime_code, diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index fcf614fd63c..6787efbd845 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -39,6 +39,7 @@ sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../external sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } +dyn-clonable = { version = "0.9.0", optional = true } # full crypto ed25519-dalek = { version = "1.0.0-pre.4", default-features = false, features = ["u64_backend", "alloc"], optional = true } @@ -111,6 +112,7 @@ std = [ "futures", "futures/thread-pool", "libsecp256k1/std", + "dyn-clonable", ] # This feature enables all crypto primitives for `no_std` builds like microcontrollers diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 27f59f4fba7..7e52efd52e9 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -72,8 +72,6 @@ mod changes_trie; pub mod traits; pub mod testing; #[cfg(feature = "std")] -pub mod tasks; -#[cfg(feature = "std")] pub mod vrf; pub use self::hash::{H160, H256, H512, convert_hash}; diff --git a/primitives/core/src/tasks.rs b/primitives/core/src/tasks.rs deleted file mode 100644 index 731e51d2470..00000000000 --- a/primitives/core/src/tasks.rs +++ /dev/null @@ -1,57 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Module for low-level asynchronous processing. - -use crate::traits::CloneableSpawn; -use futures::{executor, task}; - -/// Simple task executor. -/// -/// Uses single thread for scheduling tasks. Can be cloned and used in -/// runtime host (implements `CloneableSpawn`). -#[derive(Debug, Clone)] -pub struct Executor { - pool: executor::ThreadPool, -} - -impl Executor { - fn new() -> Self { - Self { - pool: executor::ThreadPool::builder().pool_size(1).create() - .expect("Failed to create task executor") - } - } -} - -impl task::Spawn for Executor { - fn spawn_obj(&self, future: task::FutureObj<'static, ()>) - -> Result<(), task::SpawnError> { - self.pool.spawn_obj(future) - } -} - -impl CloneableSpawn for Executor { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) - } -} - -/// Create tasks executor. -pub fn executor() -> Box { - Box::new(Executor::new()) -} diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index e512d3a39e2..5c4af736c4f 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -359,16 +359,16 @@ macro_rules! wasm_export_functions { }; } -/// An executor that supports spawning blocking futures in tests. +/// A task executor that can be used in tests. /// /// Internally this just wraps a `ThreadPool` with a pool size of `8`. This /// should ensure that we have enough threads in tests for spawning blocking futures. #[cfg(feature = "std")] #[derive(Clone)] -pub struct SpawnBlockingExecutor(futures::executor::ThreadPool); +pub struct TaskExecutor(futures::executor::ThreadPool); #[cfg(feature = "std")] -impl SpawnBlockingExecutor { +impl TaskExecutor { /// Create a new instance of `Self`. pub fn new() -> Self { let mut builder = futures::executor::ThreadPoolBuilder::new(); @@ -377,7 +377,7 @@ impl SpawnBlockingExecutor { } #[cfg(feature = "std")] -impl crate::traits::SpawnNamed for SpawnBlockingExecutor { +impl crate::traits::SpawnNamed for TaskExecutor { fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { self.0.spawn_ok(future); } diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 4481145818f..ab409b60d9c 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -352,26 +352,21 @@ impl CallInWasmExt { } } -/// Something that can spawn tasks and also can be cloned. -pub trait CloneableSpawn: futures::task::Spawn + Send + Sync { - /// Clone as heap-allocated handle. - fn clone(&self) -> Box; -} - sp_externalities::decl_extension! { /// Task executor extension. - pub struct TaskExecutorExt(Box); + pub struct TaskExecutorExt(Box); } impl TaskExecutorExt { /// New instance of task executor extension. - pub fn new(spawn_handle: Box) -> Self { - Self(spawn_handle) + pub fn new(spawn_handle: impl SpawnNamed + Send + 'static) -> Self { + Self(Box::new(spawn_handle)) } } -/// Something that can spawn futures (blocking and non-blocking) with am assigned name. -pub trait SpawnNamed { +/// Something that can spawn futures (blocking and non-blocking) with an assigned name. +#[dyn_clonable::clonable] +pub trait SpawnNamed: Clone + Send + Sync { /// Spawn the given blocking future. /// /// The given `name` is used to identify the future in tracing. @@ -381,3 +376,13 @@ pub trait SpawnNamed { /// The given `name` is used to identify the future in tracing. fn spawn(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>); } + +impl SpawnNamed for Box { + fn spawn_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + (**self).spawn_blocking(name, future) + } + + fn spawn(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + (**self).spawn(name, future) + } +} diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index c75877e67db..08d81e46c88 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -108,9 +108,9 @@ pub struct Extensions { } impl std::fmt::Debug for Extensions { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Extensions: ({})", self.extensions.len()) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Extensions: ({})", self.extensions.len()) + } } impl Extensions { diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index 642e77504d0..39229b1200b 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -17,9 +17,9 @@ //! Batch/parallel verification. -use sp_core::{ed25519, sr25519, ecdsa, crypto::Pair, traits::CloneableSpawn}; +use sp_core::{ed25519, sr25519, ecdsa, crypto::Pair, traits::SpawnNamed}; use std::sync::{Arc, atomic::{AtomicBool, Ordering as AtomicOrdering}}; -use futures::{future::FutureExt, task::FutureObj, channel::oneshot}; +use futures::{future::FutureExt, channel::oneshot}; #[derive(Debug, Clone)] struct Sr25519BatchItem { @@ -35,14 +35,14 @@ struct Sr25519BatchItem { /// call `verify_and_clear to get a result. After that, batch verifier is ready for the /// next batching job. pub struct BatchVerifier { - scheduler: Box, + scheduler: Box, sr25519_items: Vec, invalid: Arc, pending_tasks: Vec>, } impl BatchVerifier { - pub fn new(scheduler: Box) -> Self { + pub fn new(scheduler: Box) -> Self { BatchVerifier { scheduler, sr25519_items: Default::default(), @@ -56,7 +56,9 @@ impl BatchVerifier { /// Returns `false` if there was already an invalid verification or if /// the verification could not be spawned. fn spawn_verification_task( - &mut self, f: impl FnOnce() -> bool + Send + 'static, + &mut self, + f: impl FnOnce() -> bool + Send + 'static, + name: &'static str, ) -> bool { // there is already invalid transaction encountered if self.invalid.load(AtomicOrdering::Relaxed) { return false; } @@ -65,7 +67,8 @@ impl BatchVerifier { let (sender, receiver) = oneshot::channel(); self.pending_tasks.push(receiver); - self.scheduler.spawn_obj(FutureObj::new( + self.scheduler.spawn( + name, async move { if !f() { invalid_clone.store(true, AtomicOrdering::Relaxed); @@ -75,15 +78,10 @@ impl BatchVerifier { log::warn!("Verification halted while result was pending"); invalid_clone.store(true, AtomicOrdering::Relaxed); } - }.boxed() - )) - .map_err(|_| { - log::debug!( - target: "runtime", - "Batch-verification returns false because failed to spawn background task.", - ) - }) - .is_ok() + }.boxed(), + ); + + true } /// Push ed25519 signature to verify. @@ -96,7 +94,10 @@ impl BatchVerifier { pub_key: ed25519::Public, message: Vec, ) -> bool { - self.spawn_verification_task(move || ed25519::Pair::verify(&signature, &message, &pub_key)) + self.spawn_verification_task( + move || ed25519::Pair::verify(&signature, &message, &pub_key), + "substrate_ed25519_verify", + ) } /// Push sr25519 signature to verify. @@ -114,7 +115,10 @@ impl BatchVerifier { if self.sr25519_items.len() >= 128 { let items = std::mem::take(&mut self.sr25519_items); - self.spawn_verification_task(move || Self::verify_sr25519_batch(items)) + self.spawn_verification_task( + move || Self::verify_sr25519_batch(items), + "substrate_sr25519_verify", + ) } else { true } @@ -130,7 +134,10 @@ impl BatchVerifier { pub_key: ecdsa::Public, message: Vec, ) -> bool { - self.spawn_verification_task(move || ecdsa::Pair::verify(&signature, &message, &pub_key)) + self.spawn_verification_task( + move || ecdsa::Pair::verify(&signature, &message, &pub_key), + "substrate_ecdsa_verify", + ) } fn verify_sr25519_batch(items: Vec) -> bool { @@ -161,23 +168,24 @@ impl BatchVerifier { if pending.len() > 0 { let (sender, receiver) = std::sync::mpsc::channel(); - if self.scheduler.spawn_obj(FutureObj::new(async move { - futures::future::join_all(pending).await; - sender.send(()) - .expect("Channel never panics if receiver is live. \ - Receiver is always live until received this data; qed. "); - }.boxed())).is_err() { - log::debug!( + self.scheduler.spawn( + "substrate_batch_verify_join", + async move { + futures::future::join_all(pending).await; + sender.send(()) + .expect("Channel never panics if receiver is live. \ + Receiver is always live until received this data; qed. "); + }.boxed(), + ); + + if receiver.recv().is_err() { + log::warn!( target: "runtime", - "Batch-verification returns false because failed to spawn background task.", + "Haven't received async result from verification task. Returning false.", ); return false; } - if receiver.recv().is_err() { - log::warn!(target: "runtime", "Haven't received async result from verification task. Returning false."); - return false; - } } log::trace!( diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 6c99a5c7519..59d1c4f37ef 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1206,9 +1206,10 @@ pub type SubstrateHostFunctions = ( #[cfg(test)] mod tests { use super::*; - use sp_core::map; use sp_state_machine::BasicExternalities; - use sp_core::storage::Storage; + use sp_core::{ + storage::Storage, map, traits::TaskExecutorExt, testing::TaskExecutor, + }; use std::any::TypeId; #[test] @@ -1274,7 +1275,9 @@ mod tests { #[test] fn batch_verify_start_finish_works() { - let mut ext = BasicExternalities::with_tasks_executor(); + let mut ext = BasicExternalities::default(); + ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); + ext.execute_with(|| { crypto::start_batch_verify(); }); @@ -1290,7 +1293,8 @@ mod tests { #[test] fn long_sr25519_batching() { - let mut ext = BasicExternalities::with_tasks_executor(); + let mut ext = BasicExternalities::default(); + ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; crypto::start_batch_verify(); @@ -1320,7 +1324,8 @@ mod tests { #[test] fn batching_works() { - let mut ext = BasicExternalities::with_tasks_executor(); + let mut ext = BasicExternalities::default(); + ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { // invalid ed25519 signature crypto::start_batch_verify(); diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 02031a2df9a..5d65c13c664 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -871,7 +871,11 @@ mod tests { #[test] #[should_panic(expected = "Signature verification has not been called")] fn batching_still_finishes_when_not_called_directly() { - let mut ext = sp_state_machine::BasicExternalities::with_tasks_executor(); + let mut ext = sp_state_machine::BasicExternalities::default(); + ext.register_extension( + sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), + ); + ext.execute_with(|| { let _batching = SignatureBatching::start(); sp_io::crypto::sr25519_verify( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 6f1d2a4b5ad..3ddf79dbd91 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -33,7 +33,7 @@ use sp_core::{ }; use log::warn; use codec::Encode; -use sp_externalities::Extensions; +use sp_externalities::{Extensions, Extension}; /// Simple Map-based Externalities impl. #[derive(Debug)] @@ -53,17 +53,6 @@ impl BasicExternalities { Self::new(Storage::default()) } - /// New basic extternalities with tasks executor. - pub fn with_tasks_executor() -> Self { - let mut extensions = Extensions::default(); - extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); - - Self { - inner: Storage::default(), - extensions, - } - } - /// Insert key/value pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { self.inner.top.insert(k, v) @@ -107,6 +96,11 @@ impl BasicExternalities { pub fn extensions(&mut self) -> &mut Extensions { &mut self.extensions } + + /// Register an extension. + pub fn register_extension(&mut self, ext: impl Extension) { + self.extensions.register(ext); + } } impl PartialEq for BasicExternalities { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index e5e48bc47cd..ee0980f59b9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -26,7 +26,7 @@ use codec::{Decode, Encode, Codec}; use sp_core::{ offchain::storage::OffchainOverlayedChanges, storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, - traits::{CodeExecutor, CallInWasmExt, RuntimeCode}, + traits::{CodeExecutor, CallInWasmExt, RuntimeCode, SpawnNamed}, }; use sp_externalities::Extensions; @@ -77,7 +77,6 @@ pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; -pub use sp_core::traits::CloneableSpawn; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions @@ -233,7 +232,7 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where call_data: &'a [u8], mut extensions: Extensions, runtime_code: &'a RuntimeCode, - spawn_handle: Box, + spawn_handle: impl SpawnNamed + Send + 'static, ) -> Self { extensions.register(CallInWasmExt::new(exec.clone())); extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); @@ -463,11 +462,11 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where } /// Prove execution using the given state backend, overlayed changes, and call executor. -pub fn prove_execution( +pub fn prove_execution( mut backend: B, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Box, + spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -478,10 +477,11 @@ where H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _>( + prove_execution_on_trie_backend::<_, _, N, _, _>( trie_backend, overlay, exec, @@ -501,11 +501,11 @@ where /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( +pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Box, + spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -516,6 +516,7 @@ where H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { let mut offchain_overlay = OffchainOverlayedChanges::default(); let proving_backend = proving_backend::ProvingBackend::new(trie_backend); @@ -541,12 +542,12 @@ where } /// Check execution proof, generated by `prove_execution` call. -pub fn execution_proof_check( +pub fn execution_proof_check( root: H::Out, proof: StorageProof, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Box, + spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -556,9 +557,10 @@ where Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _>( + execution_proof_check_on_trie_backend::<_, N, _, _>( &trie_backend, overlay, exec, @@ -570,11 +572,11 @@ where } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( +pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, - spawn_handle: Box, + spawn_handle: Spawn, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -584,6 +586,7 @@ where H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( @@ -765,7 +768,9 @@ mod tests { use super::*; use super::ext::Ext; use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{map, traits::{Externalities, RuntimeCode}}; + use sp_core::{ + map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, + }; use sp_runtime::traits::BlakeTwo256; #[derive(Clone)] @@ -859,7 +864,7 @@ mod tests { &[], Default::default(), &wasm_code, - sp_core::tasks::executor(), + TaskExecutor::new(), ); assert_eq!( @@ -891,7 +896,7 @@ mod tests { &[], Default::default(), &wasm_code, - sp_core::tasks::executor(), + TaskExecutor::new(), ); assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]); @@ -920,7 +925,7 @@ mod tests { &[], Default::default(), &wasm_code, - sp_core::tasks::executor(), + TaskExecutor::new(), ); assert!( @@ -947,23 +952,23 @@ mod tests { // fetch execution proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution::<_, _, u64, _>( + let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( remote_backend, &mut Default::default(), &executor, - sp_core::tasks::executor(), + TaskExecutor::new(), "test", &[], &RuntimeCode::empty(), ).unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), &executor, - sp_core::tasks::executor(), + TaskExecutor::new(), "test", &[], &RuntimeCode::empty(), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index cccb044f7e3..be7dc6df9de 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -39,6 +39,8 @@ use sp_core::{ well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, Storage, }, + traits::TaskExecutorExt, + testing::TaskExecutor, }; use codec::Encode; use sp_externalities::{Extensions, Extension}; @@ -109,8 +111,7 @@ impl TestExternalities let offchain_overlay = OffchainOverlayedChanges::enabled(); let mut extensions = Extensions::default(); - extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); - + extensions.register(TaskExecutorExt::new(TaskExecutor::new())); let offchain_db = TestPersistentOffchainDB::new(); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index fd5b0e29192..060d4879675 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -23,7 +23,7 @@ pub mod client_ext; pub use sc_client_api::{ execution_extensions::{ExecutionStrategies, ExecutionExtensions}, - ForkBlocks, BadBlocks, CloneableSpawn, + ForkBlocks, BadBlocks, }; pub use sc_client_db::{Backend, self}; pub use sp_consensus; @@ -33,7 +33,7 @@ pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, }; -pub use sp_core::{traits::BareCryptoStorePtr, tasks::executor as tasks_executor}; +pub use sp_core::traits::BareCryptoStorePtr; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; pub use sc_service::{RpcHandlers, RpcSession, client}; @@ -254,7 +254,12 @@ impl TestClientBuilder< let executor = executor.into().unwrap_or_else(|| NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) ); - let executor = LocalCallExecutor::new(self.backend.clone(), executor, tasks_executor(), Default::default()); + let executor = LocalCallExecutor::new( + self.backend.clone(), + executor, + Box::new(sp_core::testing::TaskExecutor::new()), + Default::default(), + ); self.build_with_executor(executor) } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 97cf13ed2ae..5b343f7748e 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -350,7 +350,12 @@ pub fn new_light() -> ( let blockchain = Arc::new(sc_light::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain)); let executor = new_native_executor(); - let local_call_executor = client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor(), Default::default()); + let local_call_executor = client::LocalCallExecutor::new( + backend.clone(), + executor, + Box::new(sp_core::testing::TaskExecutor::new()), + Default::default(), + ); let call_executor = LightExecutor::new( backend.clone(), local_call_executor, diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 550d2c12c83..7df23f8dbfc 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -24,11 +24,8 @@ use sc_executor::NativeExecutor; use sp_state_machine::StateMachine; use sp_externalities::Extensions; use sc_service::{Configuration, NativeExecutionDispatch}; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_core::{ - tasks, testing::KeyStore, traits::KeystoreExt, offchain::{OffchainExt, testing::TestOffchainExt}, @@ -81,7 +78,7 @@ impl BenchmarkCmd { ).encode(), extensions, &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, - tasks::executor(), + sp_core::testing::TaskExecutor::new(), ) .execute(strategy.into()) .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 32042362366..99e38aaac03 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -298,7 +298,7 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -338,7 +338,7 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -362,7 +362,7 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), @@ -395,7 +395,7 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); - let spawner = sp_core::testing::SpawnBlockingExecutor::new(); + let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), Arc::new(FullChainApi::new(client.clone(), None)), -- GitLab From 8f3d88cd994c573f3ee75cc079dcc3912320ec63 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 27 Jul 2020 11:34:02 +0200 Subject: [PATCH 008/132] Add the Substrate Service Tasks dashboard (#6665) --- .../substrate-service-tasks.json | 1232 +++++++++++++++++ 1 file changed, 1232 insertions(+) create mode 100644 .maintain/monitoring/grafana-dashboards/substrate-service-tasks.json diff --git a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json new file mode 100644 index 00000000000..245071c210b --- /dev/null +++ b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json @@ -0,0 +1,1232 @@ +{ + "__inputs": [ + { + "name": "VAR_METRIC_NAMESPACE", + "type": "constant", + "label": "Prefix of the metrics", + "value": "polkadot", + "description": "" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.7.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + }, + { + "datasource": "$data_source", + "enable": true, + "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", instance=~\"${nodename}\"}[5m])", + "hide": true, + "iconColor": "rgba(255, 96, 96, 1)", + "limit": 100, + "name": "Task panics", + "rawQuery": "SELECT\n extract(epoch from time_column) AS time,\n text_column as text,\n tags_column as tags\nFROM\n metric_table\nWHERE\n $__timeFilter(time_column)\n", + "showIn": 0, + "step": "", + "tags": [], + "textFormat": "{{instance}} - {{task_name}}", + "titleFormat": "Panic!", + "type": "tags" + }, + { + "datasource": "$data_source", + "enable": true, + "expr": "changes(${metric_namespace}_process_start_time_seconds{instance=~\"${nodename}\"}[5m])", + "hide": false, + "iconColor": "#8AB8FF", + "name": "Node reboots", + "showIn": 0, + "step": "", + "textFormat": "{{instance}}", + "titleFormat": "Reboots" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1594822742772, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 25, + "panels": [], + "title": "CPU & Memory", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "stddev-above", + "fillBelowTo": "stddev-below", + "hideTooltip": true, + "lines": false + }, + { + "alias": "stddev-below", + "hideTooltip": true, + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", + "interval": "", + "legendFormat": "cpu-usage", + "refId": "A" + }, + { + "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"}) - stddev(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", + "interval": "", + "legendFormat": "stddev-below", + "refId": "B" + }, + { + "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"}) + stddev(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", + "interval": "", + "legendFormat": "stddev-above", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average CPU usage and standard deviation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "${metric_namespace}_memory_usage_bytes{instance=~\"${nodename}\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 29, + "panels": [], + "title": "Tasks", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 11, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg(increase(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[$__interval])) by (task_name) * 1000 / $__interval_ms", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU time spent on each task (average per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 30, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg(rate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[5m])) by (task_name)", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Task polling rate per second (average per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 26 + }, + "hiddenSeries": false, + "id": 31, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "max(rate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[5m])) by (task_name)", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Task polling rate per second (maximum per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 15, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[5m]))", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of tasks started per second (average per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 10, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 16, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "max by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[5m]))", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of tasks started per second (maximum over all nodes)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 10, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 44 + }, + "hiddenSeries": false, + "id": 2, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of tasks running (average per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 10, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 50 + }, + "hiddenSeries": false, + "id": 3, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "max by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of tasks running (maximum over all nodes)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 10, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 56 + }, + "hiddenSeries": false, + "id": 7, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": true, + "targets": [ + { + "expr": "avg(\n rate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[1m])\n - ignoring(le)\n rate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[1m])\n) by (task_name) > 0", + "interval": "", + "legendFormat": "{{task_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Calls to `Future::poll` that took more than one second (average per node)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": "Calls to `Future::poll`/second", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 62 + }, + "id": 27, + "panels": [], + "title": "Misc", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 63 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "${metric_namespace}_threads{instance=~\"${nodename}\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of threads", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "30s", + "schemaVersion": 22, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$data_source", + "definition": "${metric_namespace}_cpu_usage_percentage", + "hide": 0, + "includeAll": true, + "index": -1, + "label": "Instance filter", + "multi": true, + "name": "nodename", + "options": [], + "query": "${metric_namespace}_cpu_usage_percentage", + "refresh": 1, + "regex": "/instance=\"(.*?)\"/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "value": "${VAR_METRIC_NAMESPACE}", + "text": "${VAR_METRIC_NAMESPACE}" + }, + "hide": 2, + "label": "Prefix of the metrics", + "name": "metric_namespace", + "options": [ + { + "value": "${VAR_METRIC_NAMESPACE}", + "text": "${VAR_METRIC_NAMESPACE}" + } + ], + "query": "${VAR_METRIC_NAMESPACE}", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "selected": false, + "text": "prometheus.parity-mgmt", + "value": "prometheus.parity-mgmt" + }, + "hide": 0, + "includeAll": false, + "label": "Source of all the data", + "multi": false, + "name": "data_source", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-12h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Substrate Service Tasks", + "uid": "3LA6XNqZz", + "variables": { + "list": [] + }, + "version": 44 +} -- GitLab From c619555df731289df3d34e6072dd52fb6305046c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 27 Jul 2020 11:45:00 +0200 Subject: [PATCH 009/132] seal: Fail instantiate if new contract is below subsistence threshold (#6719) * seal: Fail instantiate if new contract is below subsistence threshold We need each contract that exists to be above the subsistence threshold in order to keep up the guarantuee that we always leave a tombstone behind with the exception of a contract that called `ext_terminate`. * Fixup executor test * Bump runtime --- bin/node/executor/tests/basic.rs | 4 +++- bin/node/runtime/src/lib.rs | 2 +- frame/contracts/src/exec.rs | 13 ++++++++----- frame/contracts/src/lib.rs | 4 ++-- frame/contracts/src/tests.rs | 7 ++++--- frame/contracts/src/wasm/runtime.rs | 8 +++++--- 6 files changed, 23 insertions(+), 15 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 79160ebb9e8..f6dc1c3e7ea 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -591,6 +591,8 @@ fn deploying_wasm_contract_should_work() { &charlie(), ); + let subsistence = pallet_contracts::Config::::subsistence_threshold_uncached(); + let b = construct_block( &mut new_test_ext(compact_code_unwrap(), false), 1, @@ -610,7 +612,7 @@ fn deploying_wasm_contract_should_work() { signed: Some((charlie(), signed_extra(1, 0))), function: Call::Contracts( pallet_contracts::Call::instantiate::( - 1 * DOLLARS, + 1 * DOLLARS + subsistence, 500_000_000, transfer_ch, Vec::new() diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f514e1819f1..ad748c45740 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -105,7 +105,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 256, + spec_version: 257, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f6327f7f2d9..e8965692aa2 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -426,7 +426,10 @@ where )?; // Error out if insufficient remaining balance. - if T::Currency::free_balance(&dest) < nested.config.existential_deposit { + // We need each contract that exists to be above the subsistence threshold + // in order to keep up the guarantuee that we always leave a tombstone behind + // with the exception of a contract that called `ext_terminate`. + if T::Currency::free_balance(&dest) < nested.config.subsistence_threshold() { Err("insufficient remaining balance")? } @@ -1016,7 +1019,7 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let result = ctx.instantiate(1, &mut gas_meter, &code, vec![]); + let result = ctx.instantiate(cfg.subsistence_threshold(), &mut gas_meter, &code, vec![]); assert_matches!(result, Ok(_)); let mut toks = gas_meter.tokens().iter(); @@ -1306,7 +1309,7 @@ mod tests { set_balance(&ALICE, 100); let result = ctx.instantiate( - 1, + cfg.subsistence_threshold(), &mut GasMeter::::new(GAS_LIMIT), &input_data_ch, vec![1, 2, 3, 4], @@ -1549,7 +1552,7 @@ mod tests { // Instantiate a contract and save it's address in `instantiated_contract_address`. let (address, output) = ctx.ext.instantiate( &dummy_ch, - 15u64, + Config::::subsistence_threshold_uncached(), ctx.gas_meter, vec![] ).unwrap(); @@ -1679,7 +1682,7 @@ mod tests { set_balance(&ALICE, 100); let result = ctx.instantiate( - 1, + cfg.subsistence_threshold(), &mut GasMeter::::new(GAS_LIMIT), &rent_allowance_ch, vec![], diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 4b3a48119f2..003853102d6 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -743,7 +743,7 @@ impl Config { /// than the subsistence threshold in order to guarantee that a tombstone is created. /// /// The only way to completely kill a contract without a tombstone is calling `ext_terminate`. - fn subsistence_threshold(&self) -> BalanceOf { + pub fn subsistence_threshold(&self) -> BalanceOf { self.existential_deposit.saturating_add(self.tombstone_deposit) } @@ -751,7 +751,7 @@ impl Config { /// /// This is for cases where this value is needed in rent calculation rather than /// during contract execution. - fn subsistence_threshold_uncached() -> BalanceOf { + pub fn subsistence_threshold_uncached() -> BalanceOf { T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 9051a81cc8a..0d2a2f7a314 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -385,13 +385,14 @@ fn instantiate_and_call_and_deposit_event() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = super::Config::::subsistence_threshold_uncached(); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); // Check at the end to get hash on error easily let creation = Contracts::instantiate( Origin::signed(ALICE), - 100, + subsistence, GAS_LIMIT, code_hash.into(), vec![], @@ -421,14 +422,14 @@ fn instantiate_and_call_and_deposit_event() { EventRecord { phase: Phase::Initialization, event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) + pallet_balances::RawEvent::Endowed(BOB, subsistence) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) + pallet_balances::RawEvent::Transfer(ALICE, BOB, subsistence) ), topics: vec![], }, diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index a221e3c7cf4..0f07f2f4278 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -617,10 +617,12 @@ define_env!(Env, , // This function creates an account and executes the constructor defined in the code specified // by the code hash. The address of this new account is copied to `address_ptr` and its length // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - // length to `output_len_ptr`. + // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by + // supplying the sentinel value of `u32::max_value()` to `output_ptr` or `address_ptr`. // - // The copy of the output buffer and address can be skipped by supplying the sentinel value - // of `u32::max_value()` to `output_ptr` or `address_ptr`. + // After running the constructor it is verfied that the contract account holds at + // least the subsistence threshold. If that is not the case the instantion fails and + // the contract is not created. // // # Parameters // -- GitLab From 1bb7bb47eba17b9d0ecc2c8d5e4b9166434dce11 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 27 Jul 2020 16:31:35 +0200 Subject: [PATCH 010/132] Remove Unpin requirement for Slots (#6711) --- client/consensus/slots/src/slots.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 17a931b7c41..32316c56c9f 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -109,7 +109,7 @@ impl Slots { } } -impl Stream for Slots { +impl Stream for Slots { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { @@ -165,3 +165,6 @@ impl Stream for Slots { } } } + +impl Unpin for Slots { +} -- GitLab From 21c02bcb1eda963bed645416bd725ae07bef3b11 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 27 Jul 2020 17:30:03 +0200 Subject: [PATCH 011/132] pallet-evm: add support for tuple-based precompile declarations (#6681) * pallet-evm: add support for tuple-based precompile declarations * Add missing license header * Switch to use impl_for_tuples * Remove unnecessary impl for () --- Cargo.lock | 1 + frame/evm/Cargo.toml | 1 + frame/evm/src/lib.rs | 26 ++------------ frame/evm/src/precompiles.rs | 69 ++++++++++++++++++++++++++++++++++++ 4 files changed, 73 insertions(+), 24 deletions(-) create mode 100644 frame/evm/src/precompiles.rs diff --git a/Cargo.lock b/Cargo.lock index 40332c7e9e8..866201ce378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4389,6 +4389,7 @@ dependencies = [ "evm", "frame-support", "frame-system", + "impl-trait-for-tuples", "pallet-balances", "pallet-timestamp", "parity-scale-codec", diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 768d85bc94b..05f44f6ec02 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -26,6 +26,7 @@ primitive-types = { version = "0.7.0", default-features = false, features = ["rl rlp = { version = "0.4", default-features = false } evm = { version = "0.17", default-features = false } sha3 = { version = "0.8", default-features = false } +impl-trait-for-tuples = "0.1" [features] default = ["std"] diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 68e86a9b1f0..0cbeac6fe2d 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -21,8 +21,10 @@ #![cfg_attr(not(feature = "std"), no_std)] mod backend; +mod precompiles; mod tests; +pub use crate::precompiles::{Precompile, Precompiles}; pub use crate::backend::{Account, Log, Vicinity, Backend}; use sp_std::vec::Vec; @@ -175,30 +177,6 @@ impl> AddressMapping for HashedAddressMapping - ) -> Option, usize), ExitError>>; -} - -impl Precompiles for () { - fn execute( - _address: H160, - _input: &[u8], - _target_gas: Option - ) -> Option, usize), ExitError>> { - None - } -} - /// Substrate system chain ID. pub struct SystemChainId; diff --git a/frame/evm/src/precompiles.rs b/frame/evm/src/precompiles.rs new file mode 100644 index 00000000000..a6a10d45a20 --- /dev/null +++ b/frame/evm/src/precompiles.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_std::vec::Vec; +use sp_core::H160; +use evm::{ExitError, ExitSucceed}; +use impl_trait_for_tuples::impl_for_tuples; + +/// Custom precompiles to be used by EVM engine. +pub trait Precompiles { + /// Try to execute the code address as precompile. If the code address is not + /// a precompile or the precompile is not yet available, return `None`. + /// Otherwise, calculate the amount of gas needed with given `input` and + /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution + /// is successful. Otherwise return `Some(Err(_))`. + fn execute( + address: H160, + input: &[u8], + target_gas: Option, + ) -> Option, usize), ExitError>>; +} + +/// One single precompile used by EVM engine. +pub trait Precompile { + /// Try to execute the precompile. Calculate the amount of gas needed with given `input` and + /// `target_gas`. Return `Ok(status, output, gas_used)` if the execution is + /// successful. Otherwise return `Err(_)`. + fn execute( + input: &[u8], + target_gas: Option, + ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError>; +} + +#[impl_for_tuples(16)] +#[tuple_types_no_default_trait_bound] +impl Precompiles for Tuple { + for_tuples!( where #( Tuple: Precompile )* ); + + fn execute( + address: H160, + input: &[u8], + target_gas: Option, + ) -> Option, usize), ExitError>> { + let mut index = 0; + + for_tuples!( #( + index += 1; + if address == H160::from_low_u64_be(index) { + return Some(Tuple::execute(input, target_gas)) + } + )* ); + + None + } +} -- GitLab From f1c483a47c5a5e38a278bc1f16b92003c06e7e93 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 28 Jul 2020 11:13:11 +0200 Subject: [PATCH 012/132] prometheus: don't use protobuf feature (#6744) --- Cargo.lock | 11 ++--------- primitives/utils/Cargo.toml | 2 +- utils/prometheus/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 866201ce378..6a8d9c09054 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5442,14 +5442,13 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0575e258dab62268e7236d7307caa38848acbda7ec7ab87bd9093791e999d20" +checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd" dependencies = [ "cfg-if", "fnv", "lazy_static", - "protobuf", "spin", "thiserror", ] @@ -5505,12 +5504,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "2.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485" - [[package]] name = "pwasm-utils" version = "0.12.0" diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml index 41d7e4cf977..a554a44ce44 100644 --- a/primitives/utils/Cargo.toml +++ b/primitives/utils/Cargo.toml @@ -12,7 +12,7 @@ description = "I/O for Substrate runtimes" futures = "0.3.4" futures-core = "0.3.4" lazy_static = "1.4.0" -prometheus = "0.8.0" +prometheus = { version = "0.9.0", default-features = false } futures-timer = "3.0.2" [features] diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 6a2e993a49a..805ea19cdc6 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus = "0.8" +prometheus = { version = "0.9", default-features = false } futures-util = { version = "0.3.1", default-features = false, features = ["io"] } derive_more = "0.99" -- GitLab From f488598287b48c79cba9bc7c0da748d3d852862a Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 28 Jul 2020 14:38:38 +0200 Subject: [PATCH 013/132] Use node_template_runtime::opaque::Block instead of node_template_runtime::Block (#6737) --- bin/node-template/node/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 4c41e988d0a..7a90a7b48a8 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::time::Duration; use sc_client_api::{ExecutorProvider, RemoteBackend}; -use node_template_runtime::{self, Block, RuntimeApi}; +use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_service::{error::Error as ServiceError, Configuration, ServiceComponents, TaskManager}; use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; -- GitLab From b066029a23fdc0e6b7224a3c241c16f5122dd31b Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 28 Jul 2020 15:21:33 +0200 Subject: [PATCH 014/132] Various small improvements to service construction. (#6738) * Remove service components and add build_network, build_offchain_workers etc * Improve transaction pool api * Remove commented out line * Add PartialComponents * Add BuildNetworkParams, documentation * Remove unused imports in tests * Apply suggestions from code review Co-authored-by: Nikolay Volf * Remove unused imports in node-bench Co-authored-by: Nikolay Volf --- Cargo.lock | 1 + bin/node-template/node/src/command.rs | 8 +- bin/node-template/node/src/service.rs | 187 +++++++------- bin/node/bench/src/txpool.rs | 5 +- bin/node/cli/src/command.rs | 8 +- bin/node/cli/src/service.rs | 175 ++++++++------ bin/node/rpc/src/lib.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 6 +- client/basic-authorship/src/lib.rs | 1 - client/finality-grandpa/src/finality_proof.rs | 12 + client/offchain/src/lib.rs | 1 - client/rpc/src/author/tests.rs | 1 - client/service/Cargo.toml | 1 + client/service/src/builder.rs | 228 ++++++++++-------- client/service/src/lib.rs | 54 +++-- client/transaction-pool/src/lib.rs | 37 +-- utils/frame/rpc/system/src/lib.rs | 6 +- 17 files changed, 404 insertions(+), 329 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a8d9c09054..2ce99bfbbf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7039,6 +7039,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-finality-grandpa", + "sp-inherents", "sp-io", "sp-runtime", "sp-session", diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 81b3ce779ac..b3f1cfaf11f 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -19,8 +19,8 @@ use crate::chain_spec; use crate::cli::Cli; use crate::service; use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; -use sc_service::ServiceParams; -use crate::service::new_full_params; +use sc_service::PartialComponents; +use crate::service::new_partial; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -70,8 +70,8 @@ pub fn run() -> sc_cli::Result<()> { Some(subcommand) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (ServiceParams { client, backend, task_manager, import_queue, .. }, ..) - = new_full_params(config)?; + let PartialComponents { client, backend, task_manager, import_queue, .. } + = new_partial(&config)?; Ok((client, backend, import_queue, task_manager)) }) } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 7a90a7b48a8..599560355ec 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -4,14 +4,12 @@ use std::sync::Arc; use std::time::Duration; use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_service::{error::Error as ServiceError, Configuration, ServiceComponents, TaskManager}; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_finality_grandpa::{ - FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, SharedVoterState, -}; +use sc_finality_grandpa::{FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState}; // Our native executor instance. native_executor_instance!( @@ -24,18 +22,15 @@ type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; -pub fn new_full_params(config: Configuration) -> Result<( - sc_service::ServiceParams< - Block, FullClient, - sc_consensus_aura::AuraImportQueue, - sc_transaction_pool::FullPool, - (), FullBackend, - >, - FullSelectChain, - sp_inherents::InherentDataProviders, - sc_finality_grandpa::GrandpaBlockImport, - sc_finality_grandpa::LinkHalf -), ServiceError> { +pub fn new_partial(config: &Configuration) -> Result, + sc_transaction_pool::FullPool, + ( + sc_finality_grandpa::GrandpaBlockImport, + sc_finality_grandpa::LinkHalf + ) +>, ServiceError> { let inherent_data_providers = sp_inherents::InherentDataProviders::new(); let (client, backend, keystore, task_manager) = @@ -44,12 +39,8 @@ pub fn new_full_params(config: Configuration) -> Result<( let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let pool_api = sc_transaction_pool::FullChainApi::new( - client.clone(), config.prometheus_registry(), - ); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), - std::sync::Arc::new(pool_api), config.prometheus_registry(), task_manager.spawn_handle(), client.clone(), @@ -74,56 +65,62 @@ pub fn new_full_params(config: Configuration) -> Result<( config.prometheus_registry(), )?; - let provider = client.clone() as Arc>; - let finality_proof_provider = - Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); - - let params = sc_service::ServiceParams { - backend, client, import_queue, keystore, task_manager, transaction_pool, - config, - block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider), - on_demand: None, - remote_blockchain: None, - rpc_extensions_builder: Box::new(|_| ()), - }; - - Ok(( - params, select_chain, inherent_data_providers, - grandpa_block_import, grandpa_link, - )) + Ok(sc_service::PartialComponents { + client, backend, task_manager, import_queue, keystore, select_chain, transaction_pool, + inherent_data_providers, + other: (grandpa_block_import, grandpa_link), + }) } /// Builds a new service for a full client. pub fn new_full(config: Configuration) -> Result { - let ( - params, select_chain, inherent_data_providers, - block_import, grandpa_link, - ) = new_full_params(config)?; - - let ( - role, force_authoring, name, enable_grandpa, prometheus_registry, - client, transaction_pool, keystore, - ) = { - let sc_service::ServiceParams { - config, client, transaction_pool, keystore, .. - } = ¶ms; - - ( - config.role.clone(), - config.force_authoring, - config.network.node_name.clone(), - !config.disable_grandpa, - config.prometheus_registry().cloned(), - - client.clone(), transaction_pool.clone(), keystore.clone(), - ) - }; + let sc_service::PartialComponents { + client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, + inherent_data_providers, + other: (block_import, grandpa_link), + } = new_partial(&config)?; + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider.clone()), + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); - let ServiceComponents { - task_manager, network, telemetry_on_connect_sinks, .. - } = sc_service::build(params)?; + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore.clone(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + telemetry_connection_sinks: telemetry_connection_sinks.clone(), + rpc_extensions_builder: Box::new(|_| ()), + on_demand: None, + remote_blockchain: None, + backend, network_status_sinks, system_rpc_tx, config, + })?; if role.is_authority() { let proposer = sc_basic_authorship::ProposerFactory::new( @@ -183,7 +180,7 @@ pub fn new_full(config: Configuration) -> Result { link: grandpa_link, network, inherent_data_providers, - telemetry_on_connect: Some(telemetry_on_connect_sinks.on_connect_stream()), + telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), @@ -208,18 +205,16 @@ pub fn new_full(config: Configuration) -> Result { /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result { - let (client, backend, keystore, task_manager, on_demand) = + let (client, backend, keystore, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; - - let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( - client.clone(), on_demand.clone(), - )); - let transaction_pool = sc_transaction_pool::BasicPool::new_light( + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), - transaction_pool_api, config.prometheus_registry(), task_manager.spawn_handle(), - ); + client.clone(), + on_demand.clone(), + )); let grandpa_block_import = sc_finality_grandpa::light_block_import( client.clone(), backend.clone(), &(client.clone() as Arc<_>), @@ -241,16 +236,42 @@ pub fn new_light(config: Configuration) -> Result { )?; let finality_proof_provider = - Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), client.clone() as Arc<_>)); + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - sc_service::build(sc_service::ServiceParams { - block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), - on_demand: Some(on_demand), + let (network, network_status_sinks, system_rpc_tx) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), + transaction_pool, + task_manager: &mut task_manager, + on_demand: Some(on_demand), rpc_extensions_builder: Box::new(|_| ()), - transaction_pool: Arc::new(transaction_pool), - config, client, import_queue, keystore, backend, task_manager - }).map(|ServiceComponents { task_manager, .. }| task_manager) + telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), + config, + client, + keystore, + backend, + network, + network_status_sinks, + system_rpc_tx, + })?; + + Ok(task_manager) } diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index 9e579587957..d6e5578192e 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -21,11 +21,11 @@ //! The goal of this benchmark is to figure out time needed to fill //! the transaction pool for the next block. -use std::{borrow::Cow, sync::Arc}; +use std::borrow::Cow; use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; -use sc_transaction_pool::{BasicPool, FullChainApi}; +use sc_transaction_pool::BasicPool; use sp_runtime::generic::BlockId; use sp_transaction_pool::{TransactionPool, TransactionSource}; @@ -74,7 +74,6 @@ impl core::Benchmark for PoolBenchmark { let executor = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(context.client.clone(), None)), None, executor, context.client.clone(), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 7615aef3d26..69d9a029865 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -20,8 +20,8 @@ use crate::{chain_spec, service, Cli, Subcommand}; use node_executor::Executor; use node_runtime::{Block, RuntimeApi}; use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; -use sc_service::ServiceParams; -use crate::service::new_full_params; +use sc_service::PartialComponents; +use crate::service::new_partial; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -96,8 +96,8 @@ pub fn run() -> Result<()> { Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { - let (ServiceParams { client, backend, import_queue, task_manager, .. }, ..) - = new_full_params(config)?; + let PartialComponents { client, backend, task_manager, import_queue, ..} + = new_partial(&config)?; Ok((client, backend, import_queue, task_manager)) }) } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 65f2fdb64a8..322e9bf1d8a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -22,14 +22,12 @@ use std::sync::Arc; use sc_consensus_babe; -use grandpa::{ - self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, -}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ config::{Role, Configuration}, error::{Error as ServiceError}, - RpcHandlers, ServiceComponents, TaskManager, + RpcHandlers, TaskManager, }; use sp_inherents::InherentDataProviders; use sc_network::{Event, NetworkService}; @@ -46,34 +44,28 @@ type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; type LightClient = sc_service::TLightClient; -pub fn new_full_params(config: Configuration) -> Result<( - sc_service::ServiceParams< - Block, FullClient, - sc_consensus_babe::BabeImportQueue, - sc_transaction_pool::FullPool, node_rpc::IoHandler, - FullBackend - >, +pub fn new_partial(config: &Configuration) -> Result, + sc_transaction_pool::FullPool, ( - sc_consensus_babe::BabeBlockImport, - grandpa::LinkHalf, - sc_consensus_babe::BabeLink, - ), - grandpa::SharedVoterState, - FullSelectChain, - InherentDataProviders -), ServiceError> { + impl Fn(node_rpc::DenyUnsafe) -> node_rpc::IoHandler, + ( + sc_consensus_babe::BabeBlockImport, + grandpa::LinkHalf, + sc_consensus_babe::BabeLink, + ), + grandpa::SharedVoterState, + ) +>, ServiceError> { let (client, backend, keystore, task_manager) = sc_service::new_full_parts::(&config)?; let client = Arc::new(client); let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let pool_api = sc_transaction_pool::FullChainApi::new( - client.clone(), config.prometheus_registry(), - ); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), - std::sync::Arc::new(pool_api), config.prometheus_registry(), task_manager.spawn_handle(), client.clone(), @@ -122,7 +114,7 @@ pub fn new_full_params(config: Configuration) -> Result<( let select_chain = select_chain.clone(); let keystore = keystore.clone(); - let rpc_extensions_builder = Box::new(move |deny_unsafe| { + let rpc_extensions_builder = move |deny_unsafe| { let deps = node_rpc::FullDeps { client: client.clone(), pool: pool.clone(), @@ -140,26 +132,16 @@ pub fn new_full_params(config: Configuration) -> Result<( }; node_rpc::create_full(deps) - }); + }; (rpc_extensions_builder, rpc_setup) }; - let provider = client.clone() as Arc>; - let finality_proof_provider = - Arc::new(grandpa::FinalityProofProvider::new(backend.clone(), provider)); - - let params = sc_service::ServiceParams { - config, backend, client, import_queue, keystore, task_manager, rpc_extensions_builder, - transaction_pool, - block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider), - on_demand: None, - remote_blockchain: None, - }; - - Ok((params, import_setup, rpc_setup, select_chain, inherent_data_providers)) + Ok(sc_service::PartialComponents { + client, backend, task_manager, keystore, select_chain, import_queue, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup) + }) } /// Creates a full service from the configuration. @@ -174,31 +156,56 @@ pub fn new_full_base( Arc::Hash>>, Arc>, ), ServiceError> { - let (params, import_setup, rpc_setup, select_chain, inherent_data_providers) - = new_full_params(config)?; + let sc_service::PartialComponents { + client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup), + } = new_partial(&config)?; - let ( - role, force_authoring, name, enable_grandpa, prometheus_registry, - client, transaction_pool, keystore, - ) = { - let sc_service::ServiceParams { - config, client, transaction_pool, keystore, .. - } = ¶ms; + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider.clone()), + })?; - ( - config.role.clone(), - config.force_authoring, - config.network.node_name.clone(), - !config.disable_grandpa, - config.prometheus_registry().cloned(), + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } - client.clone(), transaction_pool.clone(), keystore.clone(), - ) - }; + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); - let ServiceComponents { - task_manager, network, telemetry_on_connect_sinks, .. - } = sc_service::build(params)?; + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore.clone(), + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + on_demand: None, + remote_blockchain: None, + telemetry_connection_sinks: telemetry_connection_sinks.clone(), + network_status_sinks, + system_rpc_tx, + })?; let (block_import, grandpa_link, babe_link) = import_setup; let shared_voter_state = rpc_setup; @@ -295,7 +302,7 @@ pub fn new_full_base( link: grandpa_link, network: network.clone(), inherent_data_providers: inherent_data_providers.clone(), - telemetry_on_connect: Some(telemetry_on_connect_sinks.on_connect_stream()), + telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state, @@ -331,20 +338,17 @@ pub fn new_light_base(config: Configuration) -> Result<( Arc::Hash>>, Arc>> ), ServiceError> { - let (client, backend, keystore, task_manager, on_demand) = + let (client, backend, keystore, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let transaction_pool_api = Arc::new(sc_transaction_pool::LightChainApi::new( - client.clone(), - on_demand.clone(), - )); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), - transaction_pool_api, config.prometheus_registry(), task_manager.spawn_handle(), + client.clone(), + on_demand.clone(), )); let grandpa_block_import = grandpa::light_block_import( @@ -376,10 +380,27 @@ pub fn new_light_base(config: Configuration) -> Result<( config.prometheus_registry(), )?; - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client.clone() as Arc>; let finality_proof_provider = - Arc::new(GrandpaFinalityProofProvider::new(backend.clone(), provider)); + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } let light_deps = node_rpc::LightDeps { remote_blockchain: backend.remote_blockchain(), @@ -390,17 +411,17 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); - let ServiceComponents { task_manager, rpc_handlers, network, .. } = - sc_service::build(sc_service::ServiceParams { - block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), + let rpc_handlers = + sc_service::spawn_tasks(sc_service::SpawnTasksParams { on_demand: Some(on_demand), remote_blockchain: Some(backend.remote_blockchain()), rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), client: client.clone(), transaction_pool: transaction_pool.clone(), - config, import_queue, keystore, backend, task_manager, + config, keystore, backend, network_status_sinks, system_rpc_tx, + network: network.clone(), + telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), + task_manager: &mut task_manager, })?; Ok((task_manager, rpc_handlers, client, network, transaction_pool)) diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 142e7fb124b..7f1457356d9 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -45,8 +45,8 @@ use sc_consensus_babe::{Config, Epoch}; use sc_consensus_babe_rpc::BabeRpcHandler; use sc_finality_grandpa::{SharedVoterState, SharedAuthoritySet}; use sc_finality_grandpa_rpc::GrandpaRpcHandler; -use sc_rpc_api::DenyUnsafe; use sp_block_builder::BlockBuilder; +pub use sc_rpc_api::DenyUnsafe; /// Light client extra dependencies. pub struct LightDeps { diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 78bafb2f126..3c56bdd33db 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -326,7 +326,7 @@ mod tests { prelude::*, TestClientBuilder, runtime::{Extrinsic, Transfer}, TestClientBuilderExt, }; use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource}; - use sc_transaction_pool::{BasicPool, FullChainApi}; + use sc_transaction_pool::BasicPool; use sp_api::Core; use sp_blockchain::HeaderBackend; use sp_runtime::traits::NumberFor; @@ -361,7 +361,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -415,7 +414,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -451,7 +449,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -514,7 +511,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 09536044fb8..b405fc6de0f 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -34,7 +34,6 @@ //! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), -//! # Arc::new(FullChainApi::new(client.clone(), None)), //! # None, //! # spawner, //! # client.clone(), diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 55f6376579d..f334ddde2b9 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -169,6 +169,18 @@ impl FinalityProofProvider { FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) } } + + /// Create new finality proof provider for the service using: + /// + /// - backend for accessing blockchain data; + /// - storage_and_proof_provider, which is generally a client. + pub fn new_for_service( + backend: Arc, + storage_and_proof_provider: Arc>, + ) -> Arc { + Arc::new(Self::new(backend, storage_and_proof_provider)) + } + } impl sc_network::config::FinalityProofProvider for FinalityProofProvider diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index bb2965c6589..3b17c14f196 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -250,7 +250,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 7736ea0c864..5a8c58520ac 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -64,7 +64,6 @@ impl Default for TestSetup { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3511c290382..c5ccd442282 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -53,6 +53,7 @@ sp-session = { version = "2.0.0-rc5", path = "../../primitives/session" } sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } sc-network = { version = "0.8.0-rc5", path = "../network" } sc-chain-spec = { version = "2.0.0-rc5", path = "../chain-spec" } sc-light = { version = "2.0.0-rc5", path = "../light" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index a262e6488bb..4c7c1f57ee0 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -18,37 +18,35 @@ use crate::{ NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, + TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks, start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, status_sinks, metrics::MetricsService, client::{light, Client, ClientConfig}, - config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWorkerConfig}, + config::{Configuration, KeystoreConfig, PrometheusConfig}, }; use sc_client_api::{ light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sc_chain_spec::get_extension; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, import_queue::ImportQueue, }; -use futures::{ - Future, FutureExt, StreamExt, - future::ready, -}; +use futures::{FutureExt, StreamExt, future::ready}; use jsonrpc_pubsub::manager::SubscriptionManager; use sc_keystore::Store as Keystore; use log::{info, warn, error}; use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; use sc_network::NetworkService; -use parking_lot::{Mutex, RwLock}; +use parking_lot::RwLock; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{collections::HashMap, sync::Arc, pin::Pin}; +use std::{collections::HashMap, sync::Arc}; use wasm_timer::SystemTime; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::MaintainedTransactionPool; @@ -63,7 +61,6 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions }; use sp_blockchain::{HeaderMetadata, HeaderBackend}; -use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; /// A utility trait for building an RPC extension given a `DenyUnsafe` instance. /// This is useful since at service definition time we don't know whether the @@ -371,7 +368,7 @@ pub fn new_client( } /// Parameters to pass into `build`. -pub struct ServiceParams { +pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. pub config: Configuration, /// A shared client returned by `new_full_parts`/`new_light_parts`. @@ -379,17 +376,11 @@ pub struct ServiceParams { /// A shared backend returned by `new_full_parts`/`new_light_parts`. pub backend: Arc, /// A task manager returned by `new_full_parts`/`new_light_parts`. - pub task_manager: TaskManager, + pub task_manager: &'a mut TaskManager, /// A shared keystore returned by `new_full_parts`/`new_light_parts`. pub keystore: Arc>, /// An optional, shared data fetcher for light clients. pub on_demand: Option>>, - /// An import queue. - pub import_queue: TImpQu, - /// An optional finality proof request builder. - pub finality_proof_request_builder: Option>, - /// An optional, shared finality proof request provider. - pub finality_proof_provider: Option>>, /// A shared transaction pool. pub transaction_pool: Arc, /// A RPC extension builder. Use `NoopRpcExtensionBuilder` if you just want to pass in the @@ -397,15 +388,61 @@ pub struct ServiceParams { pub rpc_extensions_builder: Box + Send>, /// An optional, shared remote blockchain instance. Used for light clients. pub remote_blockchain: Option>>, - /// A block annouce validator builder. - pub block_announce_validator_builder: - Option) -> Box + Send> + Send>>, + /// A shared network instance. + pub network: Arc::Hash>>, + /// Sinks to propagate network status updates. + pub network_status_sinks: NetworkStatusSinks, + /// A Sender for RPC requests. + pub system_rpc_tx: TracingUnboundedSender>, + /// Shared Telemetry connection sinks, + pub telemetry_connection_sinks: TelemetryConnectionSinks, } -/// Put together the components of a service from the parameters. -pub fn build( - builder: ServiceParams, -) -> Result, Error> +/// Build a shared offchain workers instance. +pub fn build_offchain_workers( + config: &Configuration, + backend: Arc, + spawn_handle: SpawnTaskHandle, + client: Arc, + network: Arc::Hash>>, +) -> Option>> + where + TBl: BlockT, TBackend: sc_client_api::Backend, + >::OffchainStorage: 'static, + TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, + >::Api: sc_offchain::OffchainWorkerApi, +{ + let offchain_workers = match backend.offchain_storage() { + Some(db) => { + Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) + }, + None => { + warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); + None + }, + }; + + // Inform the offchain worker about new imported blocks + if let Some(offchain) = offchain_workers.clone() { + spawn_handle.spawn( + "offchain-notifications", + sc_offchain::notification_future( + config.role.is_authority(), + client.clone(), + offchain, + Clone::clone(&spawn_handle), + network.clone() + ) + ); + } + + offchain_workers +} + +/// Spawn the tasks that are required to run a node. +pub fn spawn_tasks( + params: SpawnTasksParams, +) -> Result, Error> where TCl: ProvideRuntimeApi + HeaderMetadata + Chain + BlockBackend + BlockIdTo + ProofProvider + @@ -421,26 +458,23 @@ pub fn build( sp_api::ApiExt, TBl: BlockT, TBackend: 'static + sc_client_api::backend::Backend + Send, - TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension { - let ServiceParams { + let SpawnTasksParams { mut config, - mut task_manager, + task_manager, client, on_demand, backend, keystore, - import_queue, - finality_proof_request_builder, - finality_proof_provider, transaction_pool, rpc_extensions_builder, remote_blockchain, - block_announce_validator_builder, - } = builder; + network, network_status_sinks, system_rpc_tx, + telemetry_connection_sinks, + } = params; let chain_info = client.usage_info().chain; @@ -458,57 +492,14 @@ pub fn build( "best" => ?chain_info.best_hash ); - let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); - - let (network, network_status_sinks, network_future) = build_network( - &config, client.clone(), transaction_pool.clone(), task_manager.spawn_handle(), - on_demand.clone(), block_announce_validator_builder, finality_proof_request_builder, - finality_proof_provider, system_rpc_rx, import_queue - )?; - let spawn_handle = task_manager.spawn_handle(); - // The network worker is responsible for gathering all network messages and processing - // them. This is quite a heavy task, and at the time of the writing of this comment it - // frequently happens that this future takes several seconds or in some situations - // even more than a minute until it has processed its entire queue. This is clearly an - // issue, and ideally we would like to fix the network future to take as little time as - // possible, but we also take the extra harm-prevention measure to execute the networking - // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", network_future); - - let offchain_storage = backend.offchain_storage(); - let offchain_workers = match (config.offchain_worker.clone(), offchain_storage.clone()) { - (OffchainWorkerConfig {enabled: true, .. }, Some(db)) => { - Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) - }, - (OffchainWorkerConfig {enabled: true, .. }, None) => { - warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); - None - }, - _ => None, - }; - // Inform the tx pool about imported and finalized blocks. spawn_handle.spawn( "txpool-notifications", sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()), ); - // Inform the offchain worker about new imported blocks - if let Some(offchain) = offchain_workers.clone() { - spawn_handle.spawn( - "offchain-notifications", - sc_offchain::notification_future( - config.role.is_authority(), - client.clone(), - offchain, - task_manager.spawn_handle(), - network.clone() - ) - ); - } - spawn_handle.spawn( "on-transaction-imported", transaction_notifications(transaction_pool.clone(), network.clone()), @@ -545,14 +536,12 @@ pub fn build( let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe| gen_handler( deny_unsafe, &config, task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), keystore.clone(), on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, - offchain_storage.clone(), system_rpc_tx.clone() + backend.offchain_storage(), system_rpc_tx.clone() ); let rpc = start_rpc_servers(&config, gen_handler)?; // This is used internally, so don't restrict access to unsafe RPC let rpc_handlers = Arc::new(RpcHandlers(gen_handler(sc_rpc::DenyUnsafe::No))); - let telemetry_connection_sinks: Arc>>> = Default::default(); - // Telemetry let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { if endpoints.is_empty() { @@ -585,18 +574,14 @@ pub fn build( // Spawn informant task spawn_handle.spawn("informant", sc_informant::build( client.clone(), - network_status_sinks.clone(), + network_status_sinks.clone().0, transaction_pool.clone(), config.informant_output_format, )); task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); - Ok(ServiceComponents { - task_manager, network, rpc_handlers, offchain_workers, - telemetry_on_connect_sinks: TelemetryOnConnectSinks(telemetry_connection_sinks), - network_status_sinks: NetworkStatusSinks::new(network_status_sinks), - }) + Ok(rpc_handlers) } async fn transaction_notifications( @@ -626,7 +611,7 @@ async fn telemetry_periodic_send( client: Arc, transaction_pool: Arc, mut metrics_service: MetricsService, - network_status_sinks: Arc, NetworkState)>> + network_status_sinks: NetworkStatusSinks, ) where TBl: BlockT, @@ -634,7 +619,7 @@ async fn telemetry_periodic_send( TExPool: MaintainedTransactionPool::Hash>, { let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); - network_status_sinks.push(std::time::Duration::from_millis(5000), state_tx); + network_status_sinks.0.push(std::time::Duration::from_millis(5000), state_tx); state_rx.for_each(move |(net_status, _)| { let info = client.usage_info(); metrics_service.tick( @@ -647,11 +632,11 @@ async fn telemetry_periodic_send( } async fn telemetry_periodic_network_state( - network_status_sinks: Arc, NetworkState)>> + network_status_sinks: NetworkStatusSinks, ) { // Periodically send the network state to the telemetry. let (netstat_tx, netstat_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); - network_status_sinks.push(std::time::Duration::from_secs(30), netstat_tx); + network_status_sinks.0.push(std::time::Duration::from_secs(30), netstat_tx); netstat_rx.for_each(move |(_, network_state)| { telemetry!( SUBSTRATE_INFO; @@ -665,7 +650,7 @@ async fn telemetry_periodic_network_state( fn build_telemetry( config: &mut Configuration, endpoints: sc_telemetry::TelemetryEndpoints, - telemetry_connection_sinks: Arc>>>, + telemetry_connection_sinks: TelemetryConnectionSinks, network: Arc::Hash>>, spawn_handle: SpawnTaskHandle, genesis_hash: ::Hash, @@ -703,7 +688,7 @@ fn build_telemetry( "network_id" => network_id.clone() ); - telemetry_connection_sinks.lock().retain(|sink| { + telemetry_connection_sinks.0.lock().retain(|sink| { sink.unbounded_send(()).is_ok() }); ready(()) @@ -805,24 +790,38 @@ fn gen_handler( )) } -fn build_network( - config: &Configuration, - client: Arc, - transaction_pool: Arc, - spawn_handle: SpawnTaskHandle, - on_demand: Option>>, - block_announce_validator_builder: Option { + /// The service configuration. + pub config: &'a Configuration, + /// A shared client returned by `new_full_parts`/`new_light_parts`. + pub client: Arc, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A handle for spawning tasks. + pub spawn_handle: SpawnTaskHandle, + /// An import queue. + pub import_queue: TImpQu, + /// An optional, shared data fetcher for light clients. + pub on_demand: Option>>, + /// A block annouce validator builder. + pub block_announce_validator_builder: Option) -> Box + Send> + Send >>, - finality_proof_request_builder: Option>, - finality_proof_provider: Option>>, - system_rpc_rx: TracingUnboundedReceiver>, - import_queue: TImpQu + /// An optional finality proof request builder. + pub finality_proof_request_builder: Option>, + /// An optional, shared finality proof request provider. + pub finality_proof_provider: Option>>, +} + +/// Build the network service, the network status sinks and an RPC sender. +pub fn build_network( + params: BuildNetworkParams ) -> Result< ( Arc::Hash>>, - Arc, NetworkState)>>, - Pin + Send>> + NetworkStatusSinks, + TracingUnboundedSender>, ), Error > @@ -834,6 +833,11 @@ fn build_network( TExPool: MaintainedTransactionPool::Hash> + 'static, TImpQu: ImportQueue + 'static, { + let BuildNetworkParams { + config, client, transaction_pool, spawn_handle, import_queue, on_demand, + block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, + } = params; + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { imports_external_transactions: !matches!(config.role, Role::Light), pool: transaction_pool, @@ -862,6 +866,7 @@ fn build_network( let network_params = sc_network::config::Params { role: config.role.clone(), executor: { + let spawn_handle = Clone::clone(&spawn_handle); Some(Box::new(move |fut| { spawn_handle.spawn("libp2p-node", fut); })) @@ -881,7 +886,9 @@ fn build_network( let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let network_status_sinks = Arc::new(status_sinks::StatusSinks::new()); + let network_status_sinks = NetworkStatusSinks::new(Arc::new(status_sinks::StatusSinks::new())); + + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( config.role.clone(), @@ -891,7 +898,16 @@ fn build_network( system_rpc_rx, has_bootnodes, config.announce_block, - ).boxed(); + ); + + // The network worker is responsible for gathering all network messages and processing + // them. This is quite a heavy task, and at the time of the writing of this comment it + // frequently happens that this future takes several seconds or in some situations + // even more than a minute until it has processed its entire queue. This is clearly an + // issue, and ideally we would like to fix the network future to take as little time as + // possible, but we also take the extra harm-prevention measure to execute the networking + // future using `spawn_blocking`. + spawn_handle.spawn_blocking("network-worker", future); - Ok((network, network_status_sinks, future)) + Ok((network, network_status_sinks, system_rpc_tx)) } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index ff7573f5fac..40826a70d45 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -52,8 +52,9 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ - new_full_client, new_client, new_full_parts, new_light_parts, build, - ServiceParams, TFullClient, TLightClient, TFullBackend, TLightBackend, + new_full_client, new_client, new_full_parts, new_light_parts, + spawn_tasks, build_network, BuildNetworkParams, build_offchain_workers, + SpawnTasksParams, TFullClient, TLightClient, TFullBackend, TLightBackend, TLightBackendWithHash, TLightClientWithBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, }; @@ -79,7 +80,8 @@ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; pub use sp_consensus::import_queue::ImportQueue; -use sc_client_api::{Backend, BlockchainEvents}; +use sc_client_api::BlockchainEvents; +pub use sc_keystore::KeyStorePtr as KeyStore; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -117,6 +119,7 @@ impl RpcHandlers { /// Sinks to propagate network status updates. /// For each element, every time the `Interval` fires we push an element on the sender. +#[derive(Clone)] pub struct NetworkStatusSinks( Arc, NetworkState)>>, ); @@ -138,9 +141,10 @@ impl NetworkStatusSinks { } /// Sinks to propagate telemetry connection established events. -pub struct TelemetryOnConnectSinks(pub Arc>>>); +#[derive(Default, Clone)] +pub struct TelemetryConnectionSinks(Arc>>>); -impl TelemetryOnConnectSinks { +impl TelemetryConnectionSinks { /// Get event stream for telemetry connection established events. pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { let (sink, stream) =tracing_unbounded("mpsc_telemetry_on_connect"); @@ -149,23 +153,26 @@ impl TelemetryOnConnectSinks { } } -/// The individual components of the chain, built by the service builder. You are encouraged to -/// deconstruct this into its fields. -pub struct ServiceComponents, TCl> { +/// An imcomplete set of chain components, but enough to run the chain ops subcommands. +pub struct PartialComponents { + /// A shared client instance. + pub client: Arc, + /// A shared backend instance. + pub backend: Arc, /// The chain task manager. pub task_manager: TaskManager, - /// A shared network instance. - pub network: Arc::Hash>>, - /// RPC handlers that can perform RPC queries. - pub rpc_handlers: Arc, - /// Sinks to propagate network status updates. - pub network_status_sinks: NetworkStatusSinks, - /// Shared Telemetry connection sinks, - pub telemetry_on_connect_sinks: TelemetryOnConnectSinks, - /// A shared offchain workers instance. - pub offchain_workers: Option>>, + /// A shared keystore instance. + pub keystore: KeyStore, + /// A chain selection algorithm instance. + pub select_chain: SelectChain, + /// An import queue. + pub import_queue: ImportQueue, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A registry of all providers of `InherentData`. + pub inherent_data_providers: sp_inherents::InherentDataProviders, + /// Everything else that needs to be passed into the main build function. + pub other: Other, } /// Builds a never-ending future that continuously polls the network. @@ -179,7 +186,7 @@ async fn build_network_future< role: Role, mut network: sc_network::NetworkWorker, client: Arc, - status_sinks: Arc, NetworkState)>>, + status_sinks: NetworkStatusSinks, mut rpc_rx: TracingUnboundedReceiver>, should_have_peers: bool, announce_imported_blocks: bool, @@ -308,7 +315,7 @@ async fn build_network_future< // At a regular interval, we send the state of the network on what is called // the "status sinks". - ready_sink = status_sinks.next().fuse() => { + ready_sink = status_sinks.0.next().fuse() => { let status = NetworkStatus { sync_state: network.sync_state(), best_seen_block: network.best_seen_block(), @@ -549,7 +556,7 @@ mod tests { use sp_consensus::SelectChain; use sp_runtime::traits::BlindCheckable; use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; - use sc_transaction_pool::{BasicPool, FullChainApi}; + use sc_transaction_pool::BasicPool; #[test] fn should_not_propagate_transactions_that_are_marked_as_such() { @@ -559,7 +566,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index bb9936984f9..6255fd478b7 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -180,18 +180,6 @@ impl BasicPool ) } - /// Create new basic transaction pool for a light node with the provided api. - pub fn new_light( - options: sc_transaction_graph::Options, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnNamed, - ) -> Self { - Self::with_revalidation_type( - options, pool_api, prometheus, RevalidationType::Light, spawner, - ) - } - /// Create new basic transaction pool with provided api and custom /// revalidation type. pub fn with_revalidation_type( @@ -342,7 +330,28 @@ impl TransactionPool for BasicPool } } -impl BasicPool, Block> +impl LightPool +where + Block: BlockT, + Client: sp_blockchain::HeaderBackend + 'static, + Fetcher: sc_client_api::Fetcher + 'static, +{ + /// Create new basic transaction pool for a light node with the provided api. + pub fn new_light( + options: sc_transaction_graph::Options, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnNamed, + client: Arc, + fetcher: Arc, + ) -> Self { + let pool_api = Arc::new(LightChainApi::new(client, fetcher)); + Self::with_revalidation_type( + options, pool_api, prometheus, RevalidationType::Light, spawner, + ) + } +} + +impl FullPool where Block: BlockT, Client: sp_api::ProvideRuntimeApi @@ -355,11 +364,11 @@ where /// Create new basic transaction pool for a full node with the provided api. pub fn new_full( options: sc_transaction_graph::Options, - pool_api: Arc>, prometheus: Option<&PrometheusRegistry>, spawner: impl SpawnNamed, client: Arc, ) -> Arc { + let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus)); let pool = Arc::new(Self::with_revalidation_type( options, pool_api, prometheus, RevalidationType::Full, spawner )); diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 99e38aaac03..2bb46369fea 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -289,7 +289,7 @@ mod tests { use futures::executor::block_on; use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; - use sc_transaction_pool::{BasicPool, FullChainApi}; + use sc_transaction_pool::BasicPool; use sp_runtime::{ApplyExtrinsicResult, transaction_validity::{TransactionValidityError, InvalidTransaction}}; #[test] @@ -301,7 +301,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -341,7 +340,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -365,7 +363,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), @@ -398,7 +395,6 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), - Arc::new(FullChainApi::new(client.clone(), None)), None, spawner, client.clone(), -- GitLab From 1ea74bd8ff7d2ef3c3ea9bc00e2f81da0863bec5 Mon Sep 17 00:00:00 2001 From: Dan Forbes Date: Tue, 28 Jul 2020 11:33:53 -0700 Subject: [PATCH 015/132] Remove unused node template deps (#6748) * Remove unused node template deps Backport changes made by @c410-f3r https://github.com/substrate-developer-hub/substrate-node-template/pull/66 * Enhancements to README * Revert change to serde per @thiolliere --- Cargo.lock | 5 ----- bin/node-template/README.md | 14 ++++++++++---- bin/node-template/node/Cargo.toml | 4 ---- bin/node-template/runtime/Cargo.toml | 2 -- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ce99bfbbf8..069a4ea1a8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3826,10 +3826,7 @@ dependencies = [ name = "node-template" version = "2.0.0-rc5" dependencies = [ - "futures 0.3.5", - "log", "node-template-runtime", - "parking_lot 0.10.2", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -3837,7 +3834,6 @@ dependencies = [ "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", - "sc-network", "sc-service", "sc-transaction-pool", "sp-consensus", @@ -3873,7 +3869,6 @@ dependencies = [ "sp-consensus-aura", "sp-core", "sp-inherents", - "sp-io", "sp-offchain", "sp-runtime", "sp-session", diff --git a/bin/node-template/README.md b/bin/node-template/README.md index ad514617ee3..5623fedb534 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -108,7 +108,8 @@ Substrate-based blockchain nodes expose a number of capabilities: - Consensus: Blockchains must have a way to come to [consensus](https://substrate.dev/docs/en/knowledgebase/advanced/consensus) on the state of the network. Substrate makes it possible to supply custom consensus engines and also ships with - several consensus mechanisms that have been built on top of Web3 Foundation research. + several consensus mechanisms that have been built on top of + [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). - RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. There are several files in the `node` directory - take special note of the following: @@ -140,12 +141,17 @@ capabilities and configuration parameters that it exposes: ### Runtime -The Substrate project in this repository uses the -[FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) framework to construct a +In Substrate, the terms +"[runtime](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary#runtime)" and +"[state transition function](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary#stf-state-transition-function)" +are analogous - they refer to the core logic of the blockchain that is responsible for validating +blocks and executing the state changes they define. The Substrate project in this repository uses +the [FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) framework to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful [macro language](https://substrate.dev/docs/en/knowledgebase/runtime/macros) that makes it easy to -create pallets and flexibly compose them to create blockchains that can address a variety of needs. +create pallets and flexibly compose them to create blockchains that can address +[a variety of needs](https://www.substrate.io/substrate-users/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index cd4007a8833..82c2d7ad43b 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -16,10 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -futures = "0.3.4" -log = "0.4.8" structopt = "0.3.8" -parking_lot = "0.10.0" sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli", features = ["wasmtime"] } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } @@ -28,7 +25,6 @@ sc-service = { version = "0.8.0-rc5", path = "../../../client/service", features sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8.0-rc5", path = "../../../client/network" } sc-consensus-aura = { version = "0.8.0-rc5", path = "../../../client/consensus/aura" } sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consensus/aura" } sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 71b68dfc2fb..088a1324111 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -29,7 +29,6 @@ sp-block-builder = { path = "../../../primitives/block-builder", default-feature sp-consensus-aura = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/consensus/aura" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc5"} -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/io" } sp-offchain = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/offchain" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } @@ -58,7 +57,6 @@ std = [ "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", - "sp-io/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", -- GitLab From b5e43059a1147329035a849406f1ad7ff420ff03 Mon Sep 17 00:00:00 2001 From: Joseph Date: Tue, 28 Jul 2020 21:14:19 +0200 Subject: [PATCH 016/132] Replace Process.toml with json (#6740) * Replace Process.toml with json * Trigger checks * Revert "Trigger checks" This reverts commit 9bdf9f135cecb92ca3859dfa211d396a48dd6a8d. * Trigger checks * Revert "Trigger checks" This reverts commit b0c6f29d6aefaf7ca8b137c7d2f958a5e0929d9e. --- Process.json | 24 ++++++++++++++++++++++++ Process.toml | 24 ------------------------ 2 files changed, 24 insertions(+), 24 deletions(-) create mode 100644 Process.json delete mode 100644 Process.toml diff --git a/Process.json b/Process.json new file mode 100644 index 00000000000..cd15e137df6 --- /dev/null +++ b/Process.json @@ -0,0 +1,24 @@ +[{ + "project_name": "Networking", + "owner": "tomaka", + "matrix_room_id": "!vUADSGcyXmxhKLeDsW:matrix.parity.io" +}, +{ "project_name": "Client", + "owner": "gnunicorn", + "matrix_room_id": "!aenJixaHcSKbJOWxYk:matrix.parity.io" +}, +{ + "project_name": "Runtime", + "owner": "gavofyork", + "matrix_room_id": "!yBKstWVBkwzUkPslsp:matrix.parity.io" +}, +{ + "project_name": "Consensus", + "owner": "andresilva", + "matrix_room_id": "!XdNWDTfVNFVixljKZU:matrix.parity.io" +}, +{ + "project_name": "Smart Contracts", + "owner": "pepyakin", + "matrix_room_id": "!yBKstWVBkwzUkPslsp:matrix.parity.io" +}] diff --git a/Process.toml b/Process.toml deleted file mode 100644 index ecaf5c7120f..00000000000 --- a/Process.toml +++ /dev/null @@ -1,24 +0,0 @@ -[Networking] -owner = "tomaka" -whitelist = [] -matrix_room_id = "!vUADSGcyXmxhKLeDsW:matrix.parity.io" - -[Client] -owner = "gnunicorn" -whitelist = [] -matrix_room_id = "!aenJixaHcSKbJOWxYk:matrix.parity.io" - -[Runtime] -owner = "gavofyork" -whitelist = [] -matrix_room_id = "!yBKstWVBkwzUkPslsp:matrix.parity.io" - -[Consensus] -owner = "andresilva" -whitelist = [] -matrix_room_id = "!XdNWDTfVNFVixljKZU:matrix.parity.io" - -[Smart Contracts] -owner = "pepyakin" -whitelist = [] -matrix_room_id = "!yBKstWVBkwzUkPslsp:matrix.parity.io" -- GitLab From 99274e39c715cb94a89a7164289a01031be15d38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 28 Jul 2020 22:26:01 +0200 Subject: [PATCH 017/132] Update parity-scale-codec to prepare for breaking rustc release (#6746) This updates parity-scale-codec{-derive} to prepare for a rustc release that would otherwise break the derive implementation: https://github.com/rust-lang/rust/pull/73084 --- Cargo.lock | 8 ++++---- bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/executor/Cargo.toml | 2 +- bin/node/inspect/Cargo.toml | 2 +- bin/node/primitives/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- bin/node/testing/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/api/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/basic-authorship/Cargo.toml | 2 +- client/block-builder/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/epochs/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/consensus/slots/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/executor/common/Cargo.toml | 2 +- client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmtime/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/light/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- client/transaction-pool/graph/Cargo.toml | 2 +- frame/assets/Cargo.toml | 2 +- frame/atomic-swap/Cargo.toml | 2 +- frame/aura/Cargo.toml | 2 +- frame/authority-discovery/Cargo.toml | 2 +- frame/authorship/Cargo.toml | 2 +- frame/babe/Cargo.toml | 2 +- frame/balances/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 2 +- frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/contracts/common/Cargo.toml | 2 +- frame/contracts/rpc/Cargo.toml | 2 +- frame/contracts/rpc/runtime-api/Cargo.toml | 2 +- frame/democracy/Cargo.toml | 2 +- frame/elections-phragmen/Cargo.toml | 2 +- frame/elections/Cargo.toml | 2 +- frame/evm/Cargo.toml | 2 +- frame/example-offchain-worker/Cargo.toml | 2 +- frame/example/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- frame/finality-tracker/Cargo.toml | 2 +- frame/generic-asset/Cargo.toml | 2 +- frame/grandpa/Cargo.toml | 2 +- frame/identity/Cargo.toml | 2 +- frame/im-online/Cargo.toml | 2 +- frame/indices/Cargo.toml | 2 +- frame/membership/Cargo.toml | 2 +- frame/metadata/Cargo.toml | 2 +- frame/multisig/Cargo.toml | 2 +- frame/nicks/Cargo.toml | 2 +- frame/offences/Cargo.toml | 2 +- frame/offences/benchmarking/Cargo.toml | 4 ++-- frame/proxy/Cargo.toml | 2 +- frame/randomness-collective-flip/Cargo.toml | 2 +- frame/recovery/Cargo.toml | 2 +- frame/scored-pool/Cargo.toml | 2 +- frame/session/Cargo.toml | 2 +- frame/session/benchmarking/Cargo.toml | 2 +- frame/society/Cargo.toml | 2 +- frame/staking/Cargo.toml | 2 +- 76 files changed, 80 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 069a4ea1a8b..ef15e57e800 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5005,9 +5005,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.3.1" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74f02beb35d47e0706155c9eac554b50c671e0d868fe8296bcdf44a9a4847bf" +checksum = "34d38aeaffc032ec69faa476b3caaca8d4dd7f3f798137ff30359e5c7869ceb6" dependencies = [ "arrayvec 0.5.1", "bitvec", @@ -5018,9 +5018,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" +checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" dependencies = [ "proc-macro-crate", "proc-macro2", diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index a025a9b929e..6d8868386e3 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet template for defining custom runtime logic." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } [dependencies.frame-support] default-features = false diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 088a1324111..2bf31825428 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } aura = { version = "2.0.0-rc5", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } balances = { version = "2.0.0-rc5", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 71b4bfa69d8..03620976be8 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -34,7 +34,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 3849dedb880..6c6920d62be 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } node-primitives = { version = "2.0.0-rc5", path = "../primitives" } node-runtime = { version = "2.0.0-rc5", path = "../runtime" } sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 8e449e6840b..b7f828a5f1e 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99" log = "0.4.8" sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index c61b5eda009..6ff8a05d614 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/application-crypto" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index c09887e3f2a..def3378643e 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 6f210c10c54..a61a344ccee 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -17,7 +17,7 @@ pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } sc-service = { version = "0.8.0-rc5", features = ["test-helpers", "db"], path = "../../../client/service" } sc-client-db = { version = "0.8.0-rc5", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 7578347a824..5338b360807 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -22,7 +22,7 @@ tiny-bip39 = "0.7" substrate-bip39 = "0.4.1" hex = "0.4.0" hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index a789e4b7a8c..45601771a40 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-client-api" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } sc-executor = { version = "0.8.0-rc5", path = "../executor" } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index dd632198625..2866fc141a2 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ prost-build = "0.6.1" [dependencies] bytes = "0.5.0" -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index a19e838e05c..6160a41fdef 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -12,7 +12,7 @@ description = "Basic implementation of block-authoring logic." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 6f8317413cb..ac1d0265fd9 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -22,7 +22,7 @@ sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } sp-block-builder = { version = "2.0.0-rc5", path = "../../primitives/block-builder" } sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } sc-client-api = { version = "2.0.0-rc5", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index cde64ad6738..85a1eb0fe0a 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -16,7 +16,7 @@ derive_more = "0.99.2" env_logger = "0.7.0" log = "0.4.8" atty = "0.2.13" -regex = "1.3.1" +regex = "1.3.4" time = "0.1.42" ansi_term = "0.12.1" lazy_static = "1.4.0" diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index d8ed901ac47..6bf60335b7b 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consens sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.4" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 0c612f4146b..6ac8ca165eb 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-consensus-babe" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 83d793bb2db..4a26611a758 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc5"} diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 0d4fae2d7ab..b72166f9ce9 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index d18558043f5..9fe82d85053 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sc-client-api = { version = "2.0.0-rc5", path = "../../api" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 71fe65b3d18..c26f7121493 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -20,7 +20,7 @@ kvdb-memorydb = "0.7.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" parity-util-mem = { version = "0.7.0", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } blake2-rfc = "0.2.18" sc-client-api = { version = "2.0.0-rc5", path = "../api" } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 0c0282b8d71..2a6844c31f3 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index c343c17f4b8..2189d89b12d 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" derive_more = "0.99.2" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } wasmi = "0.6.2" sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 9588c12bc28..74456f06671 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" wasmi = "0.6.2" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sc-executor-common = { version = "0.8.0-rc5", path = "../common" } sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index b7891c5affc..0267cf6efad 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sc-executor-common = { version = "0.8.0-rc5", path = "../common" } sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index a8581a1b745..7b2e58b8be9 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -22,7 +22,7 @@ log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" assert_matches = "1.3.0" -parity-scale-codec = { version = "1.3.1", features = ["derive"] } +parity-scale-codec = { version = "1.3.4", features = ["derive"] } sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index 2c1aaf37a40..53e84ae3186 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -20,7 +20,7 @@ sp-core = { version = "2.0.0-rc2", path = "../../primitives/core" } sp-state-machine = { version = "0.8.0-rc2", path = "../../primitives/state-machine" } sc-client-api = { version = "2.0.0-rc2", path = "../api" } sp-api = { version = "2.0.0-rc2", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sc-executor = { version = "0.8.0-rc2", path = "../executor" } [features] diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 600e156ca91..11346fdd3ff 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -19,7 +19,7 @@ prost-build = "0.6.1" bitflags = "1.2.0" bs58 = "0.3.1" bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index a58278ee668..ef2b00daab4 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -22,7 +22,7 @@ log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" sp-offchain = { version = "2.0.0-rc5", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } rand = "0.7.2" diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 13469e09104..7701befbf71 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -12,7 +12,7 @@ description = "Substrate RPC interfaces." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } jsonrpc-core = "14.2.0" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 1eb3b71be49..9c91fa3bc07 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sc-rpc-api = { version = "0.8.0-rc5", path = "../rpc-api" } sc-client-api = { version = "2.0.0-rc5", path = "../api" } sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.2.0" log = "0.4.8" diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index c5ccd442282..32f6532e7e0 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -60,7 +60,7 @@ sc-light = { version = "2.0.0-rc5", path = "../light" } sc-client-api = { version = "2.0.0-rc5", path = "../api" } sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../db" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sc-executor = { version = "0.8.0-rc5", path = "../executor" } sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 6948579e623..d8f069eadfd 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -41,4 +41,4 @@ sc-client-api = { version = "2.0.0-rc5", path = "../../api" } sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } sc-executor = { version = "0.8.0-rc5", path = "../../executor" } sp-panic-handler = { version = "2.0.0-rc5", path = "../../../primitives/panic-handler" } -parity-scale-codec = "1.3.1" +parity-scale-codec = "1.3.4" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 3a1436e677d..685f68f0835 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ parking_lot = "0.10.0" log = "0.4.8" sc-client-api = { version = "2.0.0-rc5", path = "../api" } sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 6ae3f5a8abb..56ea881d7a0 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -12,7 +12,7 @@ description = "Substrate transaction pool implementation." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 3b47997a2c2..8719a9c8fed 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -29,7 +29,7 @@ retain_mut = "0.1.1" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } criterion = "0.3" diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 9a2d395e020..47bf0cecf21 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 6009327ab39..704d22ba780 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index e31ebc45a50..cc9b1bf6f66 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 9743badc422..bfd7017d06f 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authority-discovery" } sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } pallet-session = { version = "2.0.0-rc5", features = ["historical" ], path = "../session", default-features = false } diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index f4dd71ef6ad..9ddd24888ca 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } sp-authorship = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authorship" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 41a6496abd3..e2577e8daf1 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -12,7 +12,7 @@ description = "Consensus extension module for BABE consensus. Collects on-chain targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index df13f50802c..e6c8eec65de 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index b66fb864001..37dcd85b598 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] linregress = "0.1" paste = "0.1" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } sp-api = { version = "2.0.0-rc5", path = "../../primitives/api", default-features = false } sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime-interface", default-features = false } sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime", default-features = false } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index b5ab0bf3bcb..b1db6fe6b97 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index a57f0e33e32..f3ac96d68e9 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } pwasm-utils = { version = "0.12.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } parity-wasm = { version = "0.41.0", default-features = false } wasmi-validation = { version = "0.3.0", default-features = false } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 8788daf477b..a8f5e407600 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 8a2f64f5827..c6b8fc8ac10 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -12,7 +12,7 @@ description = "Node-specific RPC methods for interaction with contracts." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } +codec = { package = "parity-scale-codec", version = "1.3.4" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index aec4005cda9..a9266b986c6 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/std" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/runtime" } pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "../../common" } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index bc373764f16..3d67b4d0ec5 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 32433feb4f1..83ac253a8ba 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet based on seq-Phragmén election method." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 253a4a436db..8226512a626 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 05f44f6ec02..a5ec28ddf9c 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 555370c8d95..5f11bd54a45 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME example pallet for offchain worker" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 5a23f0fb3e2..2f7af90d76d 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index fdc49628a42..ea123bd6e7f 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME executives engine" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 91544215f14..9b54717e4db 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index 4c1d474e578..e1e59030627 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 3267c56de9f..004255b9e1e 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-finality-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-grandpa" } diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 04337a28fa7..99e147b3141 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 9dfc41a802c..75fc4e2454c 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 32cbc55fa0d..85ab36bc039 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../primitives/keyring" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 2b730e0b390..fd138a97c7e 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index 6d253ab5482..be508ef2c04 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -12,7 +12,7 @@ description = "Decodable variant of the RuntimeMetadata." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index cbd4047658c..aae3646644d 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 44ade197aa8..e63ed2c600d 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index deb4d057fc0..db0c847e9a1 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index df29034ee71..12d4882e60b 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME offences pallet benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } @@ -28,7 +28,6 @@ sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../.. sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } serde = { version = "1.0.101" } @@ -51,4 +50,5 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-std/std", + "codec/std", ] diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index f6059cd0bfd..155a1395420 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 06ab6496e7c..28a16dc6411 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 525df050e48..6302a817171 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index bacf2bdcffa..1c25b8abfdf 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet for scored pools" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 11efa857ca4..b4150fb8e78 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 356bfad6b98..e784ff16e85 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -24,7 +24,7 @@ rand = { version = "0.7.2", default-features = false } [dev-dependencies] serde = { version = "1.0.101" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index f7975890bc6..229191c3ccb 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index e0f963df847..f0bc0c0ac7c 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } sp-io ={ version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -- GitLab From 4e832cdb00af0f70ee776231eaa2ba6cdf092fc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 29 Jul 2020 12:32:27 +0200 Subject: [PATCH 018/132] seal: Add benchmarks for dispatchables (#6715) * seal: Fix syntax that confuses rust-analyzer * seal: Add benchmarks for Dispatchables These are only the benchmarks for the dispatchables of the pallet. Those are not listed in the Schedule because we do not want to pull the Schedule from storage before dispatching. This OK because those costs are not related to actual contract execution. Those costs (instruction costs, ext_* costs) will be benchmarked seperatly and entered into the default Schedule. * seal: Add a maximum code size * Fix comments from review * Removed SEED constant --- Cargo.lock | 3 +- bin/node/runtime/Cargo.toml | 1 + bin/node/runtime/src/lib.rs | 1 + frame/contracts/Cargo.toml | 29 +- frame/contracts/fixtures/benchmarks/dummy.wat | 4 + frame/contracts/src/benchmarking.rs | 271 ++++++++++++++++++ frame/contracts/src/lib.rs | 13 +- frame/contracts/src/tests.rs | 8 +- frame/contracts/src/wasm/mod.rs | 3 +- frame/contracts/src/wasm/prepare.rs | 5 +- 10 files changed, 315 insertions(+), 23 deletions(-) create mode 100644 frame/contracts/fixtures/benchmarks/dummy.wat create mode 100644 frame/contracts/src/benchmarking.rs diff --git a/Cargo.lock b/Cargo.lock index ef15e57e800..0fe8e4919ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4262,6 +4262,7 @@ version = "2.0.0-rc5" dependencies = [ "assert_matches", "bitflags", + "frame-benchmarking", "frame-support", "frame-system", "hex-literal", @@ -4279,8 +4280,8 @@ dependencies = [ "sp-runtime", "sp-sandbox", "sp-std", - "wabt", "wasmi-validation", + "wat", ] [[package]] diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index def3378643e..35ed7400459 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -152,6 +152,7 @@ runtime-benchmarks = [ "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index ad748c45740..9dae66a1275 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1162,6 +1162,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_collective, Council); + add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); add_benchmark!(params, batches, pallet_elections_phragmen, Elections); add_benchmark!(params, batches, pallet_grandpa, Grandpa); diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index f3ac96d68e9..74655b9528f 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -12,29 +12,31 @@ description = "FRAME pallet for WASM contracts" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -pwasm-utils = { version = "0.12.0", default-features = false } +bitflags = "1.0" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "common" } parity-wasm = { version = "0.41.0", default-features = false } -wasmi-validation = { version = "0.3.0", default-features = false } +pwasm-utils = { version = "0.12.0", default-features = false } +serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } sp-sandbox = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/sandbox" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "common" } -bitflags = "1.0" +wasmi-validation = { version = "0.3.0", default-features = false } +wat = { version = "1.0", optional = true, default-features = false } [dev-dependencies] -wabt = "0.9.2" assert_matches = "1.3.0" hex-literal = "0.2.1" -pretty_assertions = "0.6.1" pallet-balances = { version = "2.0.0-rc5", path = "../balances" } pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } pallet-randomness-collective-flip = { version = "2.0.0-rc5", path = "../randomness-collective-flip" } +pretty_assertions = "0.6.1" +wat = "1.0" [features] default = ["std"] @@ -53,3 +55,12 @@ std = [ "wasmi-validation/std", "pallet-contracts-primitives/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "wat", + # We are linking the wat crate which uses std and therefore brings with it the + # std panic handler. Therefore we need to disable out own panic handlers. Mind that + # we still override the std memory allocator. + "sp-io/disable_panic_handler", + "sp-io/disable_oom", +] diff --git a/frame/contracts/fixtures/benchmarks/dummy.wat b/frame/contracts/fixtures/benchmarks/dummy.wat new file mode 100644 index 00000000000..b878d26ef91 --- /dev/null +++ b/frame/contracts/fixtures/benchmarks/dummy.wat @@ -0,0 +1,4 @@ +(module + (func (export "call")) + (func (export "deploy")) +) diff --git a/frame/contracts/src/benchmarking.rs b/frame/contracts/src/benchmarking.rs new file mode 100644 index 00000000000..29f992643ef --- /dev/null +++ b/frame/contracts/src/benchmarking.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the contracts pallet + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; +use crate::Module as Contracts; + +use frame_benchmarking::{benchmarks, account}; +use frame_system::{Module as System, RawOrigin}; +use parity_wasm::elements::FuncBody; +use sp_runtime::traits::Hash; + +macro_rules! load_module { + ($name:expr) => {{ + let code = include_bytes!(concat!("../fixtures/benchmarks/", $name, ".wat")); + compile_module::(code) + }}; +} + +fn compile_module(code: &[u8]) -> (Vec, ::Output) { + let code = sp_std::str::from_utf8(code).expect("Invalid utf8 in wat file."); + let binary = wat::parse_str(code).expect("Failed to compile wat file."); + let hash = T::Hashing::hash(&binary); + (binary, hash) +} + +fn funding() -> BalanceOf { + T::Currency::minimum_balance() * 10_000.into() +} + +fn create_funded_user(string: &'static str, n: u32) -> T::AccountId { + let user = account(string, n, 0); + T::Currency::make_free_balance_be(&user, funding::()); + user +} + +fn contract_with_call_body(body: FuncBody) -> (Vec, ::Output) { + use parity_wasm::elements::{ + Instructions, Instruction::End, + }; + let contract = parity_wasm::builder::ModuleBuilder::new() + // deploy function (idx 0) + .function() + .signature().with_params(vec![]).with_return_type(None).build() + .body().with_instructions(Instructions::new(vec![End])).build() + .build() + // call function (idx 1) + .function() + .signature().with_params(vec![]).with_return_type(None).build() + .with_body(body) + .build() + .export().field("deploy").internal().func(0).build() + .export().field("call").internal().func(1).build() + .build(); + let bytes = contract.to_bytes().unwrap(); + let hash = T::Hashing::hash(&bytes); + (bytes, hash) +} + +fn expanded_contract(target_bytes: u32) -> (Vec, ::Output) { + use parity_wasm::elements::{ + Instruction::{self, If, I32Const, Return, End}, + BlockType, Instructions, + }; + // Base size of a contract is 47 bytes and each expansion adds 6 bytes. + // We do one expansion less to account for the code section and function body + // size fields inside the binary wasm module representation which are leb128 encoded + // and therefore grow in size when the contract grows. We are not allowed to overshoot + // because of the maximum code size that is enforced by `put_code`. + let expansions = (target_bytes.saturating_sub(47) / 6).saturating_sub(1) as usize; + const EXPANSION: [Instruction; 4] = [ + I32Const(0), + If(BlockType::NoResult), + Return, + End, + ]; + let instructions = Instructions::new( + EXPANSION + .iter() + .cycle() + .take(EXPANSION.len() * expansions) + .cloned() + .chain(sp_std::iter::once(End)) + .collect() + ); + contract_with_call_body::(FuncBody::new(Vec::new(), instructions)) +} + +fn advance_block(num: ::BlockNumber) { + let now = System::::block_number(); + System::::set_block_number(now + num); +} + +benchmarks! { + _ { + } + + // This extrinsic is pretty much constant as it is only a simple setter. + update_schedule { + let schedule = Schedule { + version: 1, + .. Default::default() + }; + }: _(RawOrigin::Root, schedule) + + // This constructs a contract that is maximal expensive to instrument. + // It creates a maximum number of metering blocks per byte. + put_code { + let n in 0 .. Contracts::::current_schedule().max_code_size; + let caller = create_funded_user::("caller", 0); + let (binary, hash) = expanded_contract::(n); + }: _(RawOrigin::Signed(caller), binary) + + // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. + // The size of the data has no influence on the costs of this extrinsic as long as the contract + // won't call `ext_input` in its constructor to copy the data to contract memory. + // The dummy contract used here does not do this. The costs for the data copy is billed as + // part of `ext_input`. + instantiate { + let data = vec![0u8; 128]; + let endowment = Config::::subsistence_threshold_uncached(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); + + }: _( + RawOrigin::Signed(caller.clone()), + endowment, + Weight::max_value(), + hash, + data + ) + verify { + assert_eq!( + funding::() - endowment, + T::Currency::free_balance(&caller), + ) + } + + // We just call a dummy contract to measure to overhead of the call extrinsic. + // As for instantiate the size of the data does not influence the costs. + call { + let data = vec![0u8; 128]; + let endowment = Config::::subsistence_threshold_uncached(); + let value = T::Currency::minimum_balance() * 100.into(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); + Contracts::::instantiate( + RawOrigin::Signed(caller.clone()).into(), + endowment, + Weight::max_value(), + hash, + vec![], + ).unwrap(); + }: _( + RawOrigin::Signed(caller.clone()), + T::Lookup::unlookup(addr), + value, + Weight::max_value(), + data + ) + verify { + assert_eq!( + funding::() - endowment - value, + T::Currency::free_balance(&caller), + ) + } + + // We benchmark the costs for sucessfully evicting an empty contract. + // The actual costs are depending on how many storage items the evicted contract + // does have. However, those costs are not to be payed by the sender but + // will be distributed over multiple blocks using a scheduler. Otherwise there is + // no incentive to remove large contracts when the removal is more expensive than + // the reward for removing them. + claim_surcharge { + let endowment = Config::::subsistence_threshold_uncached(); + let value = T::Currency::minimum_balance() * 100.into(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); + Contracts::::instantiate( + RawOrigin::Signed(caller.clone()).into(), + endowment, + Weight::max_value(), + hash, + vec![], + ).unwrap(); + + // instantiate should leave us with an alive contract + ContractInfoOf::::get(addr.clone()).unwrap().get_alive().unwrap(); + + // generate some rent + advance_block::(::SignedClaimHandicap::get() + 1.into()); + + }: _(RawOrigin::Signed(caller.clone()), addr.clone(), None) + verify { + // the claim surcharge should have evicted the contract + ContractInfoOf::::get(addr.clone()).unwrap().get_tombstone().unwrap(); + + // the caller should get the reward for being a good snitch + assert_eq!( + funding::() - endowment + ::SurchargeReward::get(), + T::Currency::free_balance(&caller), + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{ExtBuilder, Test}; + use frame_support::assert_ok; + + #[test] + fn update_schedule() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_update_schedule::()); + }); + } + + #[test] + fn put_code() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_put_code::()); + }); + } + + #[test] + fn instantiate() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_instantiate::()); + }); + } + + #[test] + fn call() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_call::()); + }); + } + + #[test] + fn claim_surcharge() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_claim_surcharge::()); + }); + } +} diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 003853102d6..6d0b481dd0d 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -85,6 +85,7 @@ mod storage; mod exec; mod wasm; mod rent; +mod benchmarking; #[cfg(test)] mod tests; @@ -107,7 +108,7 @@ use sp_runtime::{ RuntimeDebug, }; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, + decl_module, decl_event, decl_storage, decl_error, ensure, parameter_types, storage::child::ChildInfo, dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, @@ -420,6 +421,8 @@ decl_error! { /// for a tombstone to be created. Use `ext_terminate` to remove a contract without /// leaving a tombstone behind. InsufficientBalance, + /// The code supplied to `put_code` exceeds the limit specified in the current schedule. + CodeTooLarge, } } @@ -495,6 +498,7 @@ decl_module! { ) -> DispatchResult { ensure_signed(origin)?; let schedule = >::current_schedule(); + ensure!(code.len() as u32 <= schedule.max_code_size, Error::::CodeTooLarge); let result = wasm::save_code::(code, &schedule); if let Ok(code_hash) = result { Self::deposit_event(RawEvent::CodeStored(code_hash)); @@ -619,7 +623,7 @@ impl Module { address: T::AccountId, key: [u8; 32], ) -> sp_std::result::Result>, ContractAccessError> { - let contract_info = >::get(&address) + let contract_info = ContractInfoOf::::get(&address) .ok_or(ContractAccessError::DoesntExist)? .get_alive() .ok_or(ContractAccessError::IsTombstone)?; @@ -826,6 +830,10 @@ pub struct Schedule { /// The maximum length of a subject used for PRNG generation. pub max_subject_len: u32, + + /// The maximum length of a contract code in bytes. This limit applies to the uninstrumented + // and pristine form of the code as supplied to `put_code`. + pub max_code_size: u32, } // 500 (2 instructions per nano second on 2GHZ) * 1000x slowdown through wasmi @@ -857,6 +865,7 @@ impl Default for Schedule { max_table_size: 16 * 1024, enable_println: false, max_subject_len: 32, + max_code_size: 512 * 1024, } } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 0d2a2f7a314..a2d85bb3135 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -269,16 +269,12 @@ impl ExtBuilder { /// The fixture files are located under the `fixtures/` directory. fn compile_module( fixture_name: &str, -) -> Result<(Vec, ::Output), wabt::Error> +) -> wat::Result<(Vec, ::Output)> where T: frame_system::Trait, { - use std::fs; - let fixture_path = ["fixtures/", fixture_name, ".wat"].concat(); - let module_wat_source = - fs::read_to_string(&fixture_path).expect(&format!("Unable to find {} fixture", fixture_name)); - let wasm_binary = wabt::wat2wasm(module_wat_source)?; + let wasm_binary = wat::parse_file(fixture_path)?; let code_hash = T::Hashing::hash(&wasm_binary); Ok((wasm_binary, code_hash)) } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 500c0f4dcc5..68dbae896b0 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -157,7 +157,6 @@ mod tests { use crate::tests::{Test, Call}; use crate::wasm::prepare::prepare_contract; use crate::{CodeHash, BalanceOf}; - use wabt; use hex_literal::hex; use assert_matches::assert_matches; use sp_runtime::DispatchError; @@ -459,7 +458,7 @@ mod tests { ) -> ExecResult { use crate::exec::Vm; - let wasm = wabt::wat2wasm(wat).unwrap(); + let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); let prefab_module = prepare_contract::(&wasm, &schedule).unwrap(); diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 03f33f2dc62..2ffbe3bbdf6 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -391,7 +391,6 @@ mod tests { use super::*; use crate::exec::Ext; use std::fmt; - use wabt; use assert_matches::assert_matches; impl fmt::Debug for PrefabWasmModule { @@ -417,7 +416,7 @@ mod tests { ($name:ident, $wat:expr, $($expected:tt)*) => { #[test] fn $name() { - let wasm = wabt::Wat2Wasm::new().validate(false).convert($wat).unwrap(); + let wasm = wat::parse_str($wat).unwrap(); let schedule = Schedule::default(); let r = prepare_contract::(wasm.as_ref(), &schedule); assert_matches!(r, $($expected)*); @@ -694,7 +693,7 @@ mod tests { #[test] fn ext_println_debug_enabled() { - let wasm = wabt::Wat2Wasm::new().validate(false).convert( + let wasm = wat::parse_str( r#" (module (import "env" "ext_println" (func $ext_println (param i32 i32))) -- GitLab From aaf1aa85575cfc5d90c415f58c0c20aff05c1ca4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 29 Jul 2020 13:18:40 +0200 Subject: [PATCH 019/132] client/network: Adjust wording (#6755) Rename `NetworkWorker::from_worker` to `NetworkWorker::from_service` as it is a channel from the `NetworkService` to the `NetworkWorker`. --- client/network/src/service.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 7d4135de6b9..ea045013e12 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -125,7 +125,7 @@ impl NetworkWorker { ¶ms.network_config.transport, )?; - let (to_worker, from_worker) = tracing_unbounded("mpsc_network_worker"); + let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker"); if let Some(path) = params.network_config.net_config_path { fs::create_dir_all(&path)?; @@ -361,7 +361,7 @@ impl NetworkWorker { network_service: swarm, service, import_queue: params.import_queue, - from_worker, + from_service, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, metrics, @@ -843,7 +843,7 @@ enum ServiceToWorkerMsg { /// Main network worker. Must be polled in order for the network to advance. /// /// You are encouraged to poll this in a separate background thread or task. -#[must_use = "The NetworkWorker must be polled in order for the network to work"] +#[must_use = "The NetworkWorker must be polled in order for the network to advance"] pub struct NetworkWorker { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -855,10 +855,10 @@ pub struct NetworkWorker { service: Arc>, /// The *actual* network. network_service: Swarm, - /// The import queue that was passed as initialization. + /// The import queue that was passed at initialization. import_queue: Box>, - /// Messages from the `NetworkService` and that must be processed. - from_worker: TracingUnboundedReceiver>, + /// Messages from the [`NetworkService`] that must be processed. + from_service: TracingUnboundedReceiver>, /// Receiver for queries from the light client that must be processed. light_client_rqs: Option>>, /// Senders for events that happen on the network. @@ -1137,7 +1137,7 @@ impl Future for NetworkWorker { loop { // Process the next message coming from the `NetworkService`. - let msg = match this.from_worker.poll_next_unpin(cx) { + let msg = match this.from_service.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => msg, Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, -- GitLab From 826feeb835ae082bdaf589e81f766ded975c7af9 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 29 Jul 2020 13:23:19 +0200 Subject: [PATCH 020/132] =?UTF-8?q?Add=20a=20back-pressure-friendly=20alte?= =?UTF-8?q?rnative=20to=20NetworkService::write=5Fnotifications=20?= =?UTF-8?q?=F0=9F=8E=89=20(#6692)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add NetworkService::send_notifications * Doc * Doc * API adjustment * Address concerns * Make it compile * Start implementation * Progress in the implementation * Change implementation strategy again * More work before weekend * Finish changes * Minor doc fix * Revert some minor changes * Apply suggestions from code review * GroupError -> NotifsHandlerError * Apply suggestions from code review Co-authored-by: Roman Borschel * state_transition_waker -> close_waker * Apply suggestions from code review Co-authored-by: Roman Borschel * Finish renames in service.rs * More renames * More review suggestsions applied * More review addressing * Final change * 512 -> 2048 Co-authored-by: Roman Borschel --- client/network/src/behaviour.rs | 95 ++++- client/network/src/protocol.rs | 80 ++-- client/network/src/protocol/generic_proto.rs | 2 +- .../src/protocol/generic_proto/behaviour.rs | 174 +++++--- .../src/protocol/generic_proto/handler.rs | 5 +- .../protocol/generic_proto/handler/group.rs | 330 +++++++++++---- .../generic_proto/handler/notif_out.rs | 102 +++-- .../src/protocol/generic_proto/tests.rs | 9 +- .../protocol/generic_proto/upgrade/legacy.rs | 2 +- .../generic_proto/upgrade/notifications.rs | 141 +------ client/network/src/service.rs | 393 +++++++++++++++--- client/network/src/service/tests.rs | 51 +++ 12 files changed, 954 insertions(+), 430 deletions(-) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 5967613b98e..9a466388f4f 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -17,10 +17,11 @@ use crate::{ config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, peer_info, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - protocol::{message::{self, Roles}, CustomMessageOutcome, Protocol}, - Event, ObservedRole, DhtEvent, ExHashT, + protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, + ObservedRole, DhtEvent, ExHashT, }; +use bytes::Bytes; use codec::Encode as _; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; @@ -98,11 +99,53 @@ pub enum BehaviourOut { request_duration: Duration, }, - /// Any event represented by the [`Event`] enum. + /// Opened a substream with the given node with the given notifications protocol. /// - /// > **Note**: The [`Event`] enum contains the events that are available through the public - /// > API of the library. - Event(Event), + /// The protocol is always one of the notification protocols that have been registered. + NotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + /// Role of the remote. + role: ObservedRole, + }, + + /// The [`NotificationsSink`] object used to send notifications with the given peer must be + /// replaced with a new one. + /// + /// This event is typically emitted when a transport-level connection is closed and we fall + /// back to a secondary connection. + NotificationStreamReplaced { + /// Id of the peer we are connected to. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Replacement for the previous [`NotificationsSink`]. + notifications_sink: NotificationsSink, + }, + + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotificationStreamOpened` message. + NotificationStreamClosed { + /// Node we closed the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + }, + + /// Received one or more messages from the given node using the given protocol. + NotificationsReceived { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec<(ConsensusEngineId, Bytes)>, + }, + + /// Event generated by a DHT. + Dht(DhtEvent), } impl Behaviour { @@ -165,8 +208,6 @@ impl Behaviour { /// Registers a new notifications protocol. /// - /// After that, you can call `write_notifications`. - /// /// Please call `event_stream` before registering a protocol, otherwise you may miss events /// about the protocol that you have registered. /// @@ -182,14 +223,14 @@ impl Behaviour { let handshake_message = Roles::from(&self.role).encode(); let list = self.substrate.register_notifications_protocol(engine_id, protocol_name, handshake_message); - for (remote, roles) in list { + for (remote, roles, notifications_sink) in list { let role = reported_roles_to_observed_role(&self.role, remote, roles); - let ev = Event::NotificationStreamOpened { + self.events.push_back(BehaviourOut::NotificationStreamOpened { remote: remote.clone(), engine_id, role, - }; - self.events.push_back(BehaviourOut::Event(ev)); + notifications_sink: notifications_sink.clone(), + }); } } @@ -278,26 +319,34 @@ Behaviour { CustomMessageOutcome::FinalityProofRequest { target, block_hash, request } => { self.finality_proof_requests.send_request(&target, block_hash, request); }, - CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles } => { + CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { let role = reported_roles_to_observed_role(&self.role, &remote, roles); for engine_id in protocols { - self.events.push_back(BehaviourOut::Event(Event::NotificationStreamOpened { + self.events.push_back(BehaviourOut::NotificationStreamOpened { remote: remote.clone(), engine_id, role: role.clone(), - })); + notifications_sink: notifications_sink.clone(), + }); } }, + CustomMessageOutcome::NotificationStreamReplaced { remote, protocols, notifications_sink } => + for engine_id in protocols { + self.events.push_back(BehaviourOut::NotificationStreamReplaced { + remote: remote.clone(), + engine_id, + notifications_sink: notifications_sink.clone(), + }); + }, CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => for engine_id in protocols { - self.events.push_back(BehaviourOut::Event(Event::NotificationStreamClosed { + self.events.push_back(BehaviourOut::NotificationStreamClosed { remote: remote.clone(), engine_id, - })); + }); }, CustomMessageOutcome::NotificationsReceived { remote, messages } => { - let ev = Event::NotificationsReceived { remote, messages }; - self.events.push_back(BehaviourOut::Event(ev)); + self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, CustomMessageOutcome::PeerNewBest(peer_id, number) => { self.light_client_handler.update_best_block(&peer_id, number); @@ -393,16 +442,16 @@ impl NetworkBehaviourEventProcess self.substrate.add_discovered_nodes(iter::once(peer_id)); } DiscoveryOut::ValueFound(results) => { - self.events.push_back(BehaviourOut::Event(Event::Dht(DhtEvent::ValueFound(results)))); + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results))); } DiscoveryOut::ValueNotFound(key) => { - self.events.push_back(BehaviourOut::Event(Event::Dht(DhtEvent::ValueNotFound(key)))); + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key))); } DiscoveryOut::ValuePut(key) => { - self.events.push_back(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePut(key)))); + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key))); } DiscoveryOut::ValuePutFailed(key) => { - self.events.push_back(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePutFailed(key)))); + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key))); } DiscoveryOut::RandomKademliaStarted(protocols) => { for protocol in protocols { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index d606a1be989..d3a729cc8d5 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -47,8 +47,8 @@ use sp_runtime::traits::{ }; use sp_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, Message}; -use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; -use prometheus_endpoint::{Registry, Gauge, Counter, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; +use message::generic::{Message as GenericMessage, Roles}; +use prometheus_endpoint::{Registry, Gauge, Counter, GaugeVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map::Entry}; @@ -67,7 +67,7 @@ pub mod message; pub mod event; pub mod sync; -pub use generic_proto::LegacyConnectionKillError; +pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance @@ -388,7 +388,6 @@ impl Protocol { block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, boot_node_ids: Arc>, - queue_size_report: Option, ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { let info = chain.info(); let sync = ChainSync::new( @@ -417,7 +416,6 @@ impl Protocol { versions, build_status_message(&config, &chain), peerset, - queue_size_report, ); let mut legacy_equiv_by_name = HashMap::new(); @@ -948,7 +946,12 @@ impl Protocol { } /// Called on receipt of a status message via the legacy protocol on the first connection between two peers. - pub fn on_peer_connected(&mut self, who: PeerId, status: message::Status) -> CustomMessageOutcome { + pub fn on_peer_connected( + &mut self, + who: PeerId, + status: message::Status, + notifications_sink: NotificationsSink, + ) -> CustomMessageOutcome { trace!(target: "sync", "New peer {} {:?}", who, status); let _protocol_version = { if self.context_data.peers.contains_key(&who) { @@ -1060,32 +1063,7 @@ impl Protocol { remote: who, protocols: self.protocol_name_by_engine.keys().cloned().collect(), roles: info.roles, - } - } - - /// Send a notification to the given peer we're connected to. - /// - /// Doesn't do anything if we don't have a notifications substream for that protocol with that - /// peer. - pub fn write_notification( - &mut self, - target: PeerId, - engine_id: ConsensusEngineId, - message: impl Into>, - ) { - if let Some(protocol_name) = self.protocol_name_by_engine.get(&engine_id) { - let message = message.into(); - let fallback = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { - engine_id, - data: message.clone(), - }).encode(); - self.behaviour.write_notification(&target, protocol_name.clone(), message, fallback); - } else { - error!( - target: "sub-libp2p", - "Sending a notification with a protocol that wasn't registered: {:?}", - engine_id - ); + notifications_sink, } } @@ -1099,7 +1077,7 @@ impl Protocol { engine_id: ConsensusEngineId, protocol_name: impl Into>, handshake_message: Vec, - ) -> impl ExactSizeIterator + 'a { + ) -> impl Iterator + 'a { let protocol_name = protocol_name.into(); if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); @@ -1108,8 +1086,15 @@ impl Protocol { self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); } - self.context_data.peers.iter() - .map(|(peer_id, peer)| (peer_id, peer.info.roles)) + let behaviour = &self.behaviour; + self.context_data.peers.iter().filter_map(move |(peer_id, peer)| { + if let Some(notifications_sink) = behaviour.notifications_sink(peer_id) { + Some((peer_id, peer.info.roles, notifications_sink)) + } else { + log::error!("State mismatch: no notifications sink for opened peer {:?}", peer_id); + None + } + }) } /// Called when peer sends us new transactions @@ -1863,7 +1848,18 @@ pub enum CustomMessageOutcome { JustificationImport(Origin, B::Hash, NumberFor, Justification), FinalityProofImport(Origin, B::Hash, NumberFor, Vec), /// Notification protocols have been opened with a remote. - NotificationStreamOpened { remote: PeerId, protocols: Vec, roles: Roles }, + NotificationStreamOpened { + remote: PeerId, + protocols: Vec, + roles: Roles, + notifications_sink: NotificationsSink + }, + /// The [`NotificationsSink`] of some notification protocols need an update. + NotificationStreamReplaced { + remote: PeerId, + protocols: Vec, + notifications_sink: NotificationsSink, + }, /// Notification protocols have been closed with a remote. NotificationStreamClosed { remote: PeerId, protocols: Vec }, /// Messages have been received on one or more notifications protocols. @@ -2028,9 +2024,10 @@ impl NetworkBehaviour for Protocol { }; let outcome = match event { - GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, .. } => { + GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, notifications_sink, .. } => { match as Decode>::decode(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => self.on_peer_connected(peer_id, handshake), + Ok(GenericMessage::Status(handshake)) => + self.on_peer_connected(peer_id, handshake, notifications_sink), Ok(msg) => { debug!( target: "sync", @@ -2054,6 +2051,13 @@ impl NetworkBehaviour for Protocol { } } } + GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, .. } => { + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + notifications_sink, + } + }, GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { self.on_peer_disconnected(peer_id) }, diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index cf8434d8bce..3133471b0d2 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -21,7 +21,7 @@ //! network, then performs the Substrate protocol handling on top. pub use self::behaviour::{GenericProto, GenericProtoOut}; -pub use self::handler::LegacyConnectionKillError; +pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; mod behaviour; mod handler; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 215eb739338..f965980640a 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -15,8 +15,10 @@ // along with Substrate. If not, see . use crate::config::ProtocolId; -use crate::protocol::generic_proto::handler::{NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}; -use crate::protocol::generic_proto::upgrade::RegisteredProtocol; +use crate::protocol::generic_proto::{ + handler::{NotificationsSink, NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}, + upgrade::RegisteredProtocol +}; use bytes::BytesMut; use fnv::FnvHashMap; @@ -31,7 +33,6 @@ use libp2p::swarm::{ }; use log::{debug, error, trace, warn}; use parking_lot::RwLock; -use prometheus_endpoint::HistogramVec; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; use std::task::{Context, Poll}; @@ -149,9 +150,6 @@ pub struct GenericProto { /// Events to produce from `poll()`. events: VecDeque>, - - /// If `Some`, report the message queue sizes on this `Histogram`. - queue_size_report: Option, } /// Identifier for a delay firing. @@ -189,7 +187,7 @@ enum PeerState { /// We may still have ongoing traffic with that peer, but it should cease shortly. Disabled { /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. banned_until: Option, }, @@ -199,7 +197,7 @@ enum PeerState { /// but should get disconnected in a few seconds. DisabledPendingEnable { /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, /// When to enable this remote. References an entry in `delays`. timer: DelayId, /// When the `timer` will trigger. @@ -210,7 +208,7 @@ enum PeerState { /// enabled state. Enabled { /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, }, /// We received an incoming connection from this peer and forwarded that @@ -227,15 +225,15 @@ impl PeerState { self.get_open().is_some() } - /// Returns the connection ID of the first established connection + /// Returns the [`NotificationsSink`] of the first established connection /// that is open for custom protocol traffic. - fn get_open(&self) -> Option { + fn get_open(&self) -> Option<&NotificationsSink> { match self { PeerState::Disabled { open, .. } | PeerState::DisabledPendingEnable { open, .. } | PeerState::Enabled { open, .. } => if !open.is_empty() { - Some(open[0]) + Some(&open[0].1) } else { None } @@ -284,9 +282,24 @@ pub enum GenericProtoOut { /// Handshake that was sent to us. /// This is normally a "Status" message, but this is out of the concern of this code. received_handshake: Vec, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + }, + + /// The [`NotificationsSink`] object used to send notifications with the given peer must be + /// replaced with a new one. + /// + /// This event is typically emitted when a transport-level connection is closed and we fall + /// back to a secondary connection. + CustomProtocolReplaced { + /// Id of the peer we are connected to. + peer_id: PeerId, + /// Replacement for the previous [`NotificationsSink`]. + notifications_sink: NotificationsSink, }, - /// Closed a custom protocol with the remote. + /// Closed a custom protocol with the remote. The existing [`NotificationsSink`] should + /// be dropped. CustomProtocolClosed { /// Id of the peer we were connected to. peer_id: PeerId, @@ -317,16 +330,12 @@ pub enum GenericProtoOut { impl GenericProto { /// Creates a `CustomProtos`. - /// - /// The `queue_size_report` is an optional Prometheus metric that can report the size of the - /// messages queue. If passed, it must have one label for the protocol name. pub fn new( local_peer_id: PeerId, protocol: impl Into, versions: &[u8], handshake_message: Vec, peerset: sc_peerset::Peerset, - queue_size_report: Option, ) -> Self { let legacy_handshake_message = Arc::new(RwLock::new(handshake_message)); let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); @@ -342,7 +351,6 @@ impl GenericProto { incoming: SmallVec::new(), next_incoming_index: sc_peerset::IncomingIndex(0), events: VecDeque::new(), - queue_size_report, } } @@ -394,6 +402,15 @@ impl GenericProto { self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) } + /// Returns the [`NotificationsSink`] that sends notifications to the given peer, or `None` + /// if the custom protocols aren't opened with this peer. + /// + /// If [`GenericProto::is_open`] returns `true` for this `PeerId`, then this method is + /// guaranteed to return `Some`. + pub fn notifications_sink(&self, peer_id: &PeerId) -> Option<&NotificationsSink> { + self.peers.get(peer_id).and_then(|p| p.get_open()) + } + /// Disconnects the given peer if we are connected to it. pub fn disconnect_peer(&mut self, peer_id: &PeerId) { debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); @@ -538,14 +555,14 @@ impl GenericProto { message: impl Into>, encoded_fallback_message: Vec, ) { - let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { None => { debug!(target: "sub-libp2p", "Tried to sent notification to {:?} without an open channel.", target); return }, - Some(conn) => conn + Some(sink) => sink }; trace!( @@ -555,16 +572,11 @@ impl GenericProto { str::from_utf8(&protocol_name) ); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(conn), - event: NotifsHandlerIn::SendNotification { - message: message.into(), - encoded_fallback_message, - protocol_name, - }, - }); + notifs_sink.send_sync_notification( + &protocol_name, + encoded_fallback_message, + message + ); } /// Sends a message to a peer. @@ -574,25 +586,19 @@ impl GenericProto { /// Also note that even we have a valid open substream, it may in fact be already closed /// without us knowing, in which case the packet will not be received. pub fn send_packet(&mut self, target: &PeerId, message: Vec) { - let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { None => { debug!(target: "sub-libp2p", "Tried to sent packet to {:?} without an open channel.", target); return } - Some(conn) => conn + Some(sink) => sink }; trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(conn), - event: NotifsHandlerIn::SendLegacy { - message, - } - }); + notifs_sink.send_legacy(message); } /// Returns the state of the peerset manager, for debugging purposes. @@ -873,7 +879,6 @@ impl NetworkBehaviour for GenericProto { NotifsHandlerProto::new( self.legacy_protocol.clone(), self.notif_protocols.clone(), - self.queue_size_report.clone() ) } @@ -985,15 +990,26 @@ impl NetworkBehaviour for GenericProto { // i.e. there is no connection that is open for custom protocols, // in which case `CustomProtocolClosed` was already emitted. let closed = open.is_empty(); - open.retain(|c| c != conn); - if open.is_empty() && !closed { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; + let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); + open.retain(|(c, _)| c != conn); + if !closed { + if let Some((_, sink)) = open.get(0) { + if sink_closed { + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + notifications_sink: sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + reason: "Disconnected by libp2p".into(), + }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } } } _ => {} @@ -1140,9 +1156,11 @@ impl NetworkBehaviour for GenericProto { return }; - let last = match mem::replace(entry.get_mut(), PeerState::Poisoned) { + let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { PeerState::Enabled { mut open } => { - if let Some(pos) = open.iter().position(|c| c == &connection) { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { open.remove(pos); } else { debug_assert!(false); @@ -1167,16 +1185,24 @@ impl NetworkBehaviour for GenericProto { }); let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); *entry.into_mut() = PeerState::Disabled { open, banned_until: None }; - last + (last, new_notifications_sink) }, PeerState::Disabled { mut open, banned_until } => { - if let Some(pos) = open.iter().position(|c| c == &connection) { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { open.remove(pos); } else { debug_assert!(false); @@ -1188,18 +1214,28 @@ impl NetworkBehaviour for GenericProto { } let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); + *entry.into_mut() = PeerState::Disabled { open, banned_until }; - last + + (last, new_notifications_sink) }, PeerState::DisabledPendingEnable { mut open, timer, timer_deadline } => { - if let Some(pos) = open.iter().position(|c| c == &connection) { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { open.remove(pos); } else { debug_assert!(false); @@ -1211,12 +1247,20 @@ impl NetworkBehaviour for GenericProto { } let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); + *entry.into_mut() = PeerState::DisabledPendingEnable { open, timer, timer_deadline }; - last + + (last, new_notifications_sink) }, state => { error!(target: "sub-libp2p", @@ -1233,12 +1277,20 @@ impl NetworkBehaviour for GenericProto { peer_id: source, }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + if let Some(new_notifications_sink) = new_notifications_sink { + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: new_notifications_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } - NotifsHandlerOut::Open { endpoint, received_handshake } => { + NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { debug!(target: "sub-libp2p", "Handler({:?}) => Endpoint {:?} open for custom protocols.", source, endpoint); @@ -1248,8 +1300,8 @@ impl NetworkBehaviour for GenericProto { Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | Some(PeerState::Disabled { ref mut open, .. }) => { let first = open.is_empty(); - if !open.iter().any(|c| *c == connection) { - open.push(connection); + if !open.iter().any(|(c, _)| *c == connection) { + open.push((connection, notifications_sink.clone())); } else { error!( target: "sub-libp2p", @@ -1269,7 +1321,11 @@ impl NetworkBehaviour for GenericProto { if first { debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { peer_id: source, received_handshake }; + let event = GenericProtoOut::CustomProtocolOpen { + peer_id: source, + received_handshake, + notifications_sink + }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } else { diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 3b4469a8725..5845130a7db 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -15,7 +15,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub use self::group::{NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}; + +pub use self::group::{ + NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +}; pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; mod group; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 3403f7dd823..2826f7a19c8 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -63,11 +63,21 @@ use libp2p::swarm::{ SubstreamProtocol, NegotiatedSubstream, }; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::* +}; use log::{debug, error}; -use parking_lot::RwLock; -use prometheus_endpoint::HistogramVec; +use parking_lot::{Mutex, RwLock}; use std::{borrow::Cow, error, io, str, sync::Arc, task::{Context, Poll}}; +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + /// Implements the `IntoProtocolsHandler` trait of libp2p. /// /// Every time a connection with a remote starts, an instance of this struct is created and @@ -107,6 +117,18 @@ pub struct NotifsHandler { /// we push the corresponding index here and process them when the handler /// gets enabled/disabled. pending_in: Vec, + + /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has + /// been sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from the + /// receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: Option< + stream::Select< + stream::Fuse>, + stream::Fuse> + > + >, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -140,6 +162,7 @@ impl IntoProtocolsHandler for NotifsHandlerProto { legacy: self.legacy.into_handler(remote_peer_id, connected_point), enabled: EnabledState::Initial, pending_in: Vec::new(), + notifications_sink_rx: None, } } } @@ -152,32 +175,6 @@ pub enum NotifsHandlerIn { /// The node should stop using custom protocols. Disable, - - /// Sends a message through the custom protocol substream. - /// - /// > **Note**: This must **not** be a `ConsensusMessage`, `Transactions`, or - /// > `BlockAnnounce` message. - SendLegacy { - /// The message to send. - message: Vec, - }, - - /// Sends a notifications message. - SendNotification { - /// Name of the protocol for the message. - /// - /// Must match one of the registered protocols. For backwards-compatibility reasons, if - /// the remote doesn't support this protocol, we use the legacy substream. - protocol_name: Cow<'static, [u8]>, - - /// Message to send on the legacy substream if the protocol isn't available. - /// - /// This corresponds to what you would have sent with `SendLegacy`. - encoded_fallback_message: Vec, - - /// The message to send. - message: Vec, - }, } /// Event that can be emitted by a `NotifsHandler`. @@ -190,6 +187,8 @@ pub enum NotifsHandlerOut { /// Handshake that was sent to us. /// This is normally a "Status" message, but this out of the concern of this code. received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, }, /// The connection is closed for custom protocols. @@ -227,19 +226,160 @@ pub enum NotifsHandlerOut { }, } +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::send_legacy`]. + Legacy { + message: Vec, + }, + + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { + protocol_name: Vec, + encoded_fallback_message: Vec, + message: Vec, + }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Sends a message to the peer using the legacy substream. + /// + /// If too many messages are already buffered, the message is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// This method will be removed in a future version. + pub fn send_legacy<'a>(&'a self, message: impl Into>) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Legacy { + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore that `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>( + &'a self, + protocol_name: &[u8], + encoded_fallback_message: impl Into>, + message: impl Into> + ) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Notification { + protocol_name: protocol_name.to_owned(), + encoded_fallback_message: encoded_fallback_message.into(), + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore that `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self, protocol_name: &[u8]) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { protocol_name: protocol_name.to_owned(), lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, + /// Name of the protocol. Should match one of the protocols passed at initialization. + protocol_name: Vec, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send( + mut self, + encoded_fallback_message: impl Into>, + notification: impl Into> + ) -> Result<(), ()> { + self.lock.start_send(NotificationsSinkMessage::Notification { + protocol_name: self.protocol_name, + encoded_fallback_message: encoded_fallback_message.into(), + message: notification.into(), + }).map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, + /// Error in legacy protocol. + Legacy(::Error), +} + impl NotifsHandlerProto { /// Builds a new handler. /// /// `list` is a list of notification protocols names, and the message to send as part of the /// handshake. At the moment, the message is always the same whether we open a substream /// ourselves or respond to handshake from the remote. - /// - /// The `queue_size_report` is an optional Prometheus metric that can report the size of the - /// messages queue. If passed, it must have one label for the protocol name. pub fn new( legacy: RegisteredProtocol, list: impl Into, Arc>>)>>, - queue_size_report: Option ) -> Self { let list = list.into(); @@ -247,16 +387,7 @@ impl NotifsHandlerProto { .clone() .into_iter() .map(|(proto_name, initial_message)| { - let queue_size_report = queue_size_report.as_ref().and_then(|qs| { - if let Ok(utf8) = str::from_utf8(&proto_name) { - Some(qs.with_label_values(&[utf8])) - } else { - log::warn!("Ignoring Prometheus metric because {:?} isn't UTF-8", proto_name); - None - } - }); - - (NotifsOutHandlerProto::new(proto_name, queue_size_report), initial_message) + (NotifsOutHandlerProto::new(proto_name), initial_message) }).collect(); let in_handlers = list.clone() @@ -275,13 +406,7 @@ impl NotifsHandlerProto { impl ProtocolsHandler for NotifsHandler { type InEvent = NotifsHandlerIn; type OutEvent = NotifsHandlerOut; - type Error = EitherError< - EitherError< - ::Error, - ::Error, - >, - ::Error, - >; + type Error = NotifsHandlerError; type InboundProtocol = SelectUpgrade, RegisteredProtocol>; type OutboundProtocol = EitherUpgrade; // Index within the `out_handlers`; None for legacy @@ -363,24 +488,6 @@ impl ProtocolsHandler for NotifsHandler { self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); } }, - NotifsHandlerIn::SendLegacy { message } => - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), - NotifsHandlerIn::SendNotification { message, encoded_fallback_message, protocol_name } => { - for (handler, _) in &mut self.out_handlers { - if handler.protocol_name() != &protocol_name[..] { - continue; - } - - if handler.is_open() { - handler.inject_event(NotifsOutHandlerIn::Send(message)); - return; - } - } - - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { - message: encoded_fallback_message, - }); - }, } } @@ -461,6 +568,60 @@ impl ProtocolsHandler for NotifsHandler { ) -> Poll< ProtocolsHandlerEvent > { + if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { + 'poll_notifs_sink: loop { + // Before we poll the notifications sink receiver, check that all the notification + // channels are ready to send a message. + // TODO: it is planned that in the future we switch to one `NotificationsSink` per + // protocol, in which case each sink should wait only for its corresponding handler + // to be ready, and not all handlers + // see https://github.com/paritytech/substrate/issues/5670 + for (out_handler, _) in &mut self.out_handlers { + match out_handler.poll_ready(cx) { + Poll::Ready(_) => {}, + Poll::Pending => break 'poll_notifs_sink, + } + } + + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) | Poll::Pending => break, + }; + + match message { + NotificationsSinkMessage::Legacy { message } => { + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { + message + }); + } + NotificationsSinkMessage::Notification { + protocol_name, + encoded_fallback_message, + message + } => { + for (handler, _) in &mut self.out_handlers { + if handler.protocol_name() != &protocol_name[..] { + continue; + } + + if handler.is_open() { + handler.send_or_discard(message); + } + + continue 'poll_notifs_sink; + } + + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { + message: encoded_fallback_message, + }); + } + NotificationsSinkMessage::ForceClose => { + return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); + } + } + } + } + if let Poll::Ready(ev) = self.legacy.poll(cx) { return match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => @@ -468,14 +629,37 @@ impl ProtocolsHandler for NotifsHandler { protocol: protocol.map_upgrade(EitherUpgrade::B), info: None, }), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { endpoint, received_handshake, .. }) => + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { + endpoint, + received_handshake, + .. + }) => { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(self.notifications_sink_rx.is_none()); + self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); + Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { endpoint, received_handshake } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => + NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => { + // We consciously drop the receivers despite notifications being potentially + // still buffered up. + debug_assert!(self.notifications_sink_rx.is_some()); + self.notifications_sink_rx = None; + Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::Closed { endpoint, reason } - )), + )) + }, ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => Poll::Ready(ProtocolsHandlerEvent::Custom( NotifsHandlerOut::CustomMessage { message } @@ -485,7 +669,7 @@ impl ProtocolsHandler for NotifsHandler { NotifsHandlerOut::ProtocolError { is_severe, error } )), ProtocolsHandlerEvent::Close(err) => - Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), + Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), } } diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs index 6b97ad67e34..14de382c1bb 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -34,8 +34,10 @@ use libp2p::swarm::{ NegotiatedSubstream, }; use log::{debug, warn, error}; -use prometheus_endpoint::Histogram; -use std::{borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll}, time::Duration}; +use std::{ + borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, + time::Duration +}; use wasm_timer::Instant; /// Maximum duration to open a substream and receive the handshake message. After that, we @@ -56,17 +58,14 @@ const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); pub struct NotifsOutHandlerProto { /// Name of the protocol to negotiate. protocol_name: Cow<'static, [u8]>, - /// Optional Prometheus histogram to report message queue size variations. - queue_size_report: Option, } impl NotifsOutHandlerProto { /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the /// notifications substream. - pub fn new(protocol_name: impl Into>, queue_size_report: Option) -> Self { + pub fn new(protocol_name: impl Into>) -> Self { NotifsOutHandlerProto { protocol_name: protocol_name.into(), - queue_size_report, } } } @@ -78,14 +77,12 @@ impl IntoProtocolsHandler for NotifsOutHandlerProto { DeniedUpgrade } - fn into_handler(self, peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { NotifsOutHandler { protocol_name: self.protocol_name, when_connection_open: Instant::now(), - queue_size_report: self.queue_size_report, state: State::Disabled, events_queue: VecDeque::new(), - peer_id: peer_id.clone(), } } } @@ -108,17 +105,11 @@ pub struct NotifsOutHandler { /// When the connection with the remote has been successfully established. when_connection_open: Instant, - /// Optional prometheus histogram to report message queue sizes variations. - queue_size_report: Option, - /// Queue of events to send to the outside. /// /// This queue must only ever be modified to insert elements at the back, or remove the first /// element. events_queue: VecDeque>, - - /// Who we are connected to. - peer_id: PeerId, } /// Our relationship with the node we're connected to. @@ -153,6 +144,11 @@ enum State { Open { /// Substream that is currently open. substream: NotificationsOutSubstream, + /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify + /// when the open substream closes due to being disabled or encountering an + /// error, i.e. to notify the task as soon as the substream becomes unavailable, + /// without waiting for an underlying I/O task wakeup. + close_waker: Option, /// The initial message that we sent. Necessary if we need to re-open a substream. initial_message: Vec, }, @@ -173,11 +169,6 @@ pub enum NotifsOutHandlerIn { /// Disables the notifications substream for this node. This is the default state. Disable, - - /// Sends a message on the notifications substream. Ignored if the substream isn't open. - /// - /// It is only valid to send this if the notifications substream has been enabled. - Send(Vec), } /// Event that can be emitted by a `NotifsOutHandler`. @@ -216,6 +207,41 @@ impl NotifsOutHandler { pub fn protocol_name(&self) -> &[u8] { &self.protocol_name } + + /// Polls whether the outbound substream is ready to send a notification. + /// + /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. + /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. + /// - Returns `Poll::Ready(false)` if the substream is closed. + /// + pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { + if let State::Open { substream, close_waker, .. } = &mut self.state { + match substream.poll_ready_unpin(cx) { + Poll::Ready(Ok(())) => Poll::Ready(true), + Poll::Ready(Err(_)) => Poll::Ready(false), + Poll::Pending => { + *close_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + } else { + Poll::Ready(false) + } + } + + /// Sends out a notification. + /// + /// If the substream is closed, or not ready to send out a notification yet, then the + /// notification is silently discarded. + /// + /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine + /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send + /// out a notification. + pub fn send_or_discard(&mut self, notification: Vec) { + if let State::Open { substream, .. } = &mut self.state { + let _ = substream.start_send_unpin(notification); + } + } } impl ProtocolsHandler for NotifsOutHandler { @@ -247,7 +273,7 @@ impl ProtocolsHandler for NotifsOutHandler { State::Opening { initial_message } => { let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message }; + self.state = State::Open { substream, initial_message, close_waker: None }; }, // If the handler was disabled while we were negotiating the protocol, immediately // close it. @@ -310,31 +336,15 @@ impl ProtocolsHandler for NotifsOutHandler { } State::Opening { .. } => self.state = State::DisabledOpening, State::Refused => self.state = State::Disabled, - State::Open { substream, .. } => self.state = State::DisabledOpen(substream), + State::Open { substream, close_waker, .. } => { + if let Some(close_waker) = close_waker { + close_waker.wake(); + } + self.state = State::DisabledOpen(substream) + }, State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), } } - - NotifsOutHandlerIn::Send(msg) => - if let State::Open { substream, .. } = &mut self.state { - if substream.push_message(msg).is_err() { - warn!( - target: "sub-libp2p", - "📞 Notifications queue with peer {} is full, dropped message (protocol: {:?})", - self.peer_id, - self.protocol_name, - ); - } - if let Some(metric) = &self.queue_size_report { - metric.observe(substream.queue_len() as f64); - } - } else { - // This is an API misuse. - warn!( - target: "sub-libp2p", - "📞 Tried to send a notification on a disabled handler" - ); - }, } } @@ -375,10 +385,14 @@ impl ProtocolsHandler for NotifsOutHandler { } match &mut self.state { - State::Open { substream, initial_message } => + State::Open { substream, initial_message, close_waker } => match Sink::poll_flush(Pin::new(substream), cx) { Poll::Pending | Poll::Ready(Ok(())) => {}, Poll::Ready(Err(_)) => { + if let Some(close_waker) = close_waker.take() { + close_waker.wake(); + } + // We try to re-open a substream. let initial_message = mem::replace(initial_message, Vec::new()); self.state = State::Opening { initial_message: initial_message.clone() }; diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index f932a3a0891..cf9f72b89ba 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -83,7 +83,7 @@ fn build_nodes() -> (Swarm, Swarm) { }); let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(local_peer_id, &b"test"[..], &[1], vec![], peerset, None), + inner: GenericProto::new(local_peer_id, &b"test"[..], &[1], vec![], peerset), addrs: addrs .iter() .enumerate() @@ -221,9 +221,10 @@ fn two_nodes_transfer_lots_of_packets() { // We spawn two nodes, then make the first one send lots of packets to the second one. The test // ends when the second one has received all of them. - // Note that if we go too high, we will reach the limit to the number of simultaneous - // substreams allowed by the multiplexer. - const NUM_PACKETS: u32 = 5000; + // This test consists in transferring this given number of packets. Considering that (by + // design) the connection gets closed if one of the remotes can't follow the pace, this number + // should not exceed the size of the buffer of pending notifications. + const NUM_PACKETS: u32 = 512; let (mut service1, mut service2) = build_nodes(); diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index ce2d1934c0f..dd02d7e2664 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -174,7 +174,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin { } // Indicating that the remote is clogged if that's the case. - if self.send_queue.len() >= 2048 { + if self.send_queue.len() >= 1536 { if !self.clogged_fuse { // Note: this fuse is important not just for preventing us from flooding the logs; // if you remove the fuse, then we will always return early from this function and diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index efcd0a4c8fb..f1f41d5bccf 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -34,17 +34,15 @@ /// use bytes::BytesMut; -use futures::{prelude::*, ready}; +use futures::prelude::*; use futures_codec::Framed; use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; use log::error; -use std::{borrow::Cow, collections::VecDeque, convert::TryFrom as _, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use std::{borrow::Cow, io, iter, mem, pin::Pin, task::{Context, Poll}}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. const MAX_HANDSHAKE_SIZE: usize = 1024; -/// Maximum number of buffered messages before we refuse to accept more. -const MAX_PENDING_MESSAGES: usize = 512; /// Upgrade that accepts a substream, sends back a status message, then becomes a unidirectional /// stream of messages. @@ -93,10 +91,6 @@ pub struct NotificationsOutSubstream { /// Substream where to send messages. #[pin] socket: Framed>>>, - /// Queue of messages waiting to be sent. - messages_queue: VecDeque>, - /// If true, we need to flush `socket`. - need_flush: bool, } impl NotificationsIn { @@ -272,80 +266,38 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, Ok((handshake, NotificationsOutSubstream { socket: Framed::new(socket, UviBytes::default()), - messages_queue: VecDeque::with_capacity(MAX_PENDING_MESSAGES), - need_flush: false, })) }) } } -impl NotificationsOutSubstream { - /// Returns the number of items in the queue, capped to `u32::max_value()`. - pub fn queue_len(&self) -> u32 { - u32::try_from(self.messages_queue.len()).unwrap_or(u32::max_value()) - } - - /// Push a message to the queue of messages. - /// - /// This has the same effect as the `Sink::start_send` implementation. - pub fn push_message(&mut self, item: Vec) -> Result<(), NotificationsOutError> { - if self.messages_queue.len() >= MAX_PENDING_MESSAGES { - return Err(NotificationsOutError::Clogged); - } - - self.messages_queue.push_back(item); - Ok(()) - } -} - impl Sink> for NotificationsOutSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, { type Error = NotificationsOutError; - fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Ready(Ok(())) + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + Sink::poll_ready(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) } - fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - self.push_message(item) + fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + let mut this = self.project(); + Sink::start_send(this.socket.as_mut(), io::Cursor::new(item)) + .map_err(NotificationsOutError::Io) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - - while !this.messages_queue.is_empty() { - match Sink::poll_ready(this.socket.as_mut(), cx) { - Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), - Poll::Ready(Ok(())) => { - let msg = this.messages_queue.pop_front() - .expect("checked for !is_empty above; qed"); - Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg))?; - *this.need_flush = true; - }, - Poll::Pending => return Poll::Pending, - } - } - - if *this.need_flush { - match Sink::poll_flush(this.socket.as_mut(), cx) { - Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), - Poll::Ready(Ok(())) => *this.need_flush = false, - Poll::Pending => return Poll::Pending, - } - } - - Poll::Ready(Ok(())) + Sink::poll_flush(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) } - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(Sink::poll_flush(self.as_mut(), cx))?; - let this = self.project(); - match Sink::poll_close(this.socket, cx) { - Poll::Ready(Ok(())) => Poll::Ready(Ok(())), - Poll::Ready(Err(err)) => Poll::Ready(Err(From::from(err))), - Poll::Pending => Poll::Pending, - } + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + Sink::poll_close(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) } } @@ -386,13 +338,6 @@ impl From for NotificationsHandshakeError { pub enum NotificationsOutError { /// I/O error on the substream. Io(io::Error), - - /// Remote doesn't process our messages quickly enough. - /// - /// > **Note**: This is not necessarily the remote's fault, and could also be caused by the - /// > local node sending data too quickly. Properly doing back-pressure, however, - /// > would require a deep refactoring effort in Substrate as a whole. - Clogged, } #[cfg(test)] @@ -402,7 +347,6 @@ mod tests { use async_std::net::{TcpListener, TcpStream}; use futures::{prelude::*, channel::oneshot}; use libp2p::core::upgrade; - use std::pin::Pin; #[test] fn basic_works() { @@ -582,57 +526,4 @@ mod tests { async_std::task::block_on(client); } - - #[test] - fn buffer_is_full_closes_connection() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, vec![]), - upgrade::Version::V1 - ).await.unwrap(); - - assert!(handshake.is_empty()); - - // Push an item and flush so that the test works. - substream.send(b"hello world".to_vec()).await.unwrap(); - - for _ in 0..32768 { - // Push an item on the sink without flushing until an error happens because the - // buffer is full. - let message = b"hello world!".to_vec(); - if future::poll_fn(|cx| Sink::poll_ready(Pin::new(&mut substream), cx)).await.is_err() { - return Ok(()); - } - if Sink::start_send(Pin::new(&mut substream), message).is_err() { - return Ok(()); - } - } - - Err(()) - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - - assert!(initial_message.is_empty()); - substream.send_handshake(vec![]); - - // Process one message so that the handshake and all works. - let _ = substream.next().await.unwrap().unwrap(); - - client.await.unwrap(); - }); - } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index ea045013e12..c11a620c567 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -38,7 +38,7 @@ use crate::{ }, on_demand_layer::AlwaysBadChecker, light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, LegacyConnectionKillError, sync::SyncState, PeerInfo, Protocol}, + protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, transport, ReputationChange, }; use futures::prelude::*; @@ -50,7 +50,8 @@ use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handle use log::{error, info, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::{ - register, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, + register, Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, Opts, + PrometheusError, Registry, U64, }; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; @@ -61,7 +62,7 @@ use sp_runtime::{ use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ borrow::{Borrow, Cow}, - collections::HashSet, + collections::{HashMap, HashSet}, fs, marker::PhantomData, num:: NonZeroUsize, @@ -95,6 +96,14 @@ pub struct NetworkService { peerset: PeersetHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender>, + /// For each peer and protocol combination, an object that allows sending notifications to + /// that peer. Updated by the [`NetworkWorker`]. + peers_notifications_sinks: Arc>>, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: Mutex>>, + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notifications_sizes_metric: Option, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -242,7 +251,6 @@ impl NetworkWorker { params.block_announce_validator, params.metrics_registry.as_ref(), boot_node_ids.clone(), - metrics.as_ref().map(|m| m.notifications_queues_size.clone()), )?; // Build the swarm. @@ -342,6 +350,10 @@ impl NetworkWorker { } let external_addresses = Arc::new(Mutex::new(Vec::new())); + let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); + let protocol_name_by_engine = Mutex::new({ + params.network_config.notifications_protocols.iter().cloned().collect() + }); let service = Arc::new(NetworkService { bandwidth, @@ -351,6 +363,10 @@ impl NetworkWorker { peerset: peerset_handle, local_peer_id, to_worker, + peers_notifications_sinks: peers_notifications_sinks.clone(), + protocol_name_by_engine, + notifications_sizes_metric: + metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, }); @@ -364,6 +380,7 @@ impl NetworkWorker { from_service, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, + peers_notifications_sinks, metrics, boot_node_ids, }) @@ -542,8 +559,16 @@ impl NetworkService { &self.local_peer_id } - /// Writes a message on an open notifications channel. Has no effect if the notifications - /// channel with this protocol name is closed. + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. + /// + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkService::notification_sender`] instead. /// /// > **Note**: The reason why this is a no-op in the situation where we have no channel is /// > that we don't guarantee message delivery anyway. Networking issues can cause @@ -551,14 +576,145 @@ impl NetworkService { /// > between the remote voluntarily closing a substream or a network error /// > preventing the message from being delivered. /// - /// The protocol must have been registered with `register_notifications_protocol`. + /// The protocol must have been registered with `register_notifications_protocol` or + /// `NetworkConfiguration::notifications_protocols`. /// pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::WriteNotification { - target, + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + sink.clone() + } else { + // Notification silently discarded, as documented. + return; + } + }; + + // Used later for the metrics report. + let message_len = message.len(); + + // Determine the wire protocol name corresponding to this `engine_id`. + let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); + if let Some(protocol_name) = protocol_name { + // For backwards-compatibility reason, we have to duplicate the message and pass it + // in the situation where the remote still uses the legacy substream. + let fallback = codec::Encode::encode(&{ + protocol::message::generic::Message::<(), (), (), ()>::Consensus({ + protocol::message::generic::ConsensusMessage { + engine_id, + data: message.clone(), + } + }) + }); + + sink.send_sync_notification(&protocol_name, fallback, message); + } else { + return; + } + + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) + .observe(message_len as f64); + } + } + + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. + /// + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned either by `notification_sender`, by [`NotificationSender::wait`], + /// or by [`NotificationSenderReady::send`] if there exists no open notifications substream + /// with that combination of peer and protocol, or if the remote has asked to close the + /// notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkService::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with `register_notifications_protocol` or + /// `NetworkConfiguration::notifications_protocols`. + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkService::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + pub fn notification_sender( + &self, + target: PeerId, + engine_id: ConsensusEngineId, + ) -> Result { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed); + } + }; + + // Determine the wire protocol name corresponding to this `engine_id`. + let protocol_name = match self.protocol_name_by_engine.lock().get(&engine_id).cloned() { + Some(p) => p, + None => return Err(NotificationSenderError::BadProtocol), + }; + + Ok(NotificationSender { + sink, + protocol_name, engine_id, - message, - }); + notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { + histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) + }), + }) } /// Returns a stream containing the events that happen on the network. @@ -595,9 +751,11 @@ impl NetworkService { engine_id: ConsensusEngineId, protocol_name: impl Into>, ) { + let protocol_name = protocol_name.into(); + self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, - protocol_name: protocol_name.into(), + protocol_name, }); } @@ -813,6 +971,87 @@ impl NetworkStateInfo for NetworkService } } +/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. +#[must_use] +pub struct NotificationSender { + sink: NotificationsSink, + + /// Name of the protocol on the wire. + protocol_name: Cow<'static, [u8]>, + + /// Engine ID used for the fallback message. + engine_id: ConsensusEngineId, + + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notification_size_metric: Option, +} + +impl NotificationSender { + /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. + pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { + Ok(NotificationSenderReady { + ready: match self.sink.reserve_notification(&self.protocol_name).await { + Ok(r) => r, + Err(()) => return Err(NotificationSenderError::Closed), + }, + engine_id: self.engine_id, + notification_size_metric: self.notification_size_metric.clone(), + }) + } +} + +/// Reserved slot in the notifications buffer, ready to accept data. +#[must_use] +pub struct NotificationSenderReady<'a> { + ready: Ready<'a>, + + /// Engine ID used for the fallback message. + engine_id: ConsensusEngineId, + + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notification_size_metric: Option, +} + +impl<'a> NotificationSenderReady<'a> { + /// Consumes this slots reservation and actually queues the notification. + pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { + let notification = notification.into(); + + if let Some(notification_size_metric) = &self.notification_size_metric { + notification_size_metric.observe(notification.len() as f64); + } + + // For backwards-compatibility reason, we have to duplicate the message and pass it + // in the situation where the remote still uses the legacy substream. + let fallback = codec::Encode::encode(&{ + protocol::message::generic::Message::<(), (), (), ()>::Consensus({ + protocol::message::generic::ConsensusMessage { + engine_id: self.engine_id, + data: notification.clone(), + } + }) + }); + + self.ready.send(fallback, notification) + .map_err(|()| NotificationSenderError::Closed) + } +} + +/// Error returned by [`NetworkService::send_notification`]. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkService::notification_sender`]. + Closed, + /// Protocol name hasn't been registered. + BadProtocol, +} + /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. @@ -826,11 +1065,6 @@ enum ServiceToWorkerMsg { AddKnownAddress(PeerId, Multiaddr), SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), - WriteNotification { - message: Vec, - engine_id: ConsensusEngineId, - target: PeerId, - }, RegisterNotifProtocol { engine_id: ConsensusEngineId, protocol_name: Cow<'static, [u8]>, @@ -867,6 +1101,9 @@ pub struct NetworkWorker { metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: Arc>, + /// For each peer and protocol combination, an object that allows sending notifications to + /// that peer. Shared with the [`NetworkService`]. + peers_notifications_sinks: Arc>>, } struct Metrics { @@ -889,7 +1126,6 @@ struct Metrics { listeners_local_addresses: Gauge, listeners_errors_total: Counter, network_per_sec_bytes: GaugeVec, - notifications_queues_size: HistogramVec, notifications_sizes: HistogramVec, notifications_streams_closed_total: CounterVec, notifications_streams_opened_total: CounterVec, @@ -1002,16 +1238,6 @@ impl Metrics { ), &["direction"] )?, registry)?, - notifications_queues_size: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_notifications_queues_size", - "Total size of all the notification queues" - ), - buckets: vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 511.0, 512.0], - }, - &["protocol"] - )?, registry)?, notifications_sizes: register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( @@ -1088,27 +1314,6 @@ impl Metrics { )?, registry)?, }) } - - fn update_with_network_event(&self, event: &Event) { - match event { - Event::NotificationStreamOpened { engine_id, .. } => { - self.notifications_streams_opened_total - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]).inc(); - }, - Event::NotificationStreamClosed { engine_id, .. } => { - self.notifications_streams_closed_total - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]).inc(); - }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { - self.notifications_sizes - .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) - .observe(message.len() as f64); - } - }, - _ => {} - } - } } impl Future for NetworkWorker { @@ -1162,14 +1367,6 @@ impl Future for NetworkWorker { this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), - ServiceToWorkerMsg::WriteNotification { message, engine_id, target } => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_sizes - .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - .observe(message.len() as f64); - } - this.network_service.user_protocol_mut().write_notification(target, engine_id, message) - }, ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { this.network_service .register_notifications_protocol(engine_id, protocol_name); @@ -1237,11 +1434,82 @@ impl Future for NetworkWorker { .inc(); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, engine_id, notifications_sink, role })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.update_with_network_event(&ev); + metrics.notifications_streams_opened_total + .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id)]).inc(); + } + { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + peers_notifications_sinks.insert((remote.clone(), engine_id), notifications_sink); } - this.event_streams.send(ev); + this.event_streams.send(Event::NotificationStreamOpened { + remote, + engine_id, + role, + }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, engine_id, notifications_sink })) => { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + if let Some(s) = peers_notifications_sinks.get_mut(&(remote, engine_id)) { + *s = notifications_sink; + } else { + log::error!( + target: "sub-libp2p", + "NotificationStreamReplaced for non-existing substream" + ); + } + + // TODO: Notifications might have been lost as a result of the previous + // connection being dropped, and as a result it would be preferable to notify + // the users of this fact by simulating the substream being closed then + // reopened. + // The code below doesn't compile because `role` is unknown. Propagating the + // handshake of the secondary connections is quite an invasive change and + // would conflict with https://github.com/paritytech/substrate/issues/6403. + // Considering that dropping notifications is generally regarded as + // acceptable, this bug is at the moment intentionally left there and is + // intended to be fixed at the same time as + // https://github.com/paritytech/substrate/issues/6403. + /*this.event_streams.send(Event::NotificationStreamClosed { + remote, + engine_id, + }); + this.event_streams.send(Event::NotificationStreamOpened { + remote, + engine_id, + role, + });*/ + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, engine_id })) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.notifications_streams_closed_total + .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id[..])]).inc(); + } + this.event_streams.send(Event::NotificationStreamClosed { + remote: remote.clone(), + engine_id, + }); + { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + peers_notifications_sinks.remove(&(remote.clone(), engine_id)); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { + if let Some(metrics) = this.metrics.as_ref() { + for (engine_id, message) in &messages { + metrics.notifications_sizes + .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + .observe(message.len() as f64); + } + } + this.event_streams.send(Event::NotificationsReceived { + remote, + messages, + }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(ev))) => { + this.event_streams.send(Event::Dht(ev)); }, Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); @@ -1272,7 +1540,10 @@ impl Future for NetworkWorker { EitherError::A(PingFailure::Timeout)))))))) => "ping-timeout", ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A( - EitherError::B(LegacyConnectionKillError)))))))) => "force-closed", + NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))) => "force-closed", + ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged))))))) => "sync-notifications-clogged", ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => "protocol-error", ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => "keep-alive-timeout", }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 0b02153d3d8..f0982e30d99 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -345,6 +345,57 @@ fn lots_of_incoming_peers_works() { }); } +#[test] +fn notifications_back_pressure() { + // Node 1 floods node 2 with notifications. Random sleeps are done on node 2 to simulate the + // node being busy. We make sure that all notifications are received. + + const TOTAL_NOTIFS: usize = 10_000; + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let node2_id = node2.local_peer_id(); + + let receiver = async_std::task::spawn(async move { + let mut received_notifications = 0; + + while received_notifications < TOTAL_NOTIFS { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamClosed { .. } => panic!(), + Event::NotificationsReceived { messages, .. } => { + for message in messages { + assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.1, format!("hello #{}", received_notifications)); + received_notifications += 1; + } + } + _ => {} + }; + + if rand::random::() < 2 { + async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; + } + } + }); + + async_std::task::block_on(async move { + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {} + }; + } + + // Sending! + for num in 0..TOTAL_NOTIFS { + let notif = node1.notification_sender(node2_id.clone(), ENGINE_ID).unwrap(); + notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); + } + + receiver.await; + }); +} + #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { -- GitLab From 7bb8d82af8da1bcac6f20cfaa0051150a9499f89 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 29 Jul 2020 14:00:51 +0200 Subject: [PATCH 021/132] Cleanup our sort usage (#6754) --- client/authority-discovery/src/addr_cache.rs | 4 ++-- client/network/src/protocol/generic_proto/upgrade/legacy.rs | 2 +- frame/collective/src/lib.rs | 2 +- frame/contracts/src/wasm/runtime.rs | 2 +- frame/staking/src/lib.rs | 6 +++--- frame/support/src/traits.rs | 2 +- primitives/api/proc-macro/src/decl_runtime_apis.rs | 2 +- primitives/arithmetic/src/lib.rs | 4 ++-- primitives/npos-elections/src/lib.rs | 2 +- primitives/npos-elections/src/mock.rs | 2 +- primitives/state-machine/src/changes_trie/build.rs | 2 +- primitives/trie/src/lib.rs | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) diff --git a/client/authority-discovery/src/addr_cache.rs b/client/authority-discovery/src/addr_cache.rs index 0a27c1c4436..f108afce0a9 100644 --- a/client/authority-discovery/src/addr_cache.rs +++ b/client/authority-discovery/src/addr_cache.rs @@ -64,7 +64,7 @@ where return; } - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + addresses.sort_by(|a, b| a.as_ref().cmp(b.as_ref())); self.cache.insert(id, addresses); } @@ -94,7 +94,7 @@ where .collect::>(); addresses.dedup(); - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + addresses.sort_by(|a, b| a.as_ref().cmp(b.as_ref())); addresses .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index dd02d7e2664..f56ab2450d4 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -57,7 +57,7 @@ impl RegisteredProtocol { id: protocol, supported_versions: { let mut tmp = versions.to_vec(); - tmp.sort_unstable_by(|a, b| b.cmp(&a)); + tmp.sort_by(|a, b| b.cmp(&a)); tmp }, handshake_message, diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index cebf35dd2b9..1edd8c75b90 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -893,7 +893,7 @@ impl, I: Instance> ChangeMembers for Module { } // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); - outgoing.sort_unstable(); + outgoing.sort(); for h in Self::proposals().into_iter() { >::mutate(h, |v| if let Some(mut votes) = v.take() { diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 0f07f2f4278..ab93076f57b 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1211,7 +1211,7 @@ where /// the order of items is not preserved. fn has_duplicates>(items: &mut Vec) -> bool { // Sort the vector - items.sort_unstable_by(|a, b| { + items.sort_by(|a, b| { Ord::cmp(a.as_ref(), b.as_ref()) }); // And then find any two consecutive equal elements. diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index dd4ad5fc7e5..cd820051b15 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1238,12 +1238,12 @@ decl_storage! { decl_event!( pub enum Event where Balance = BalanceOf, ::AccountId { /// The era payout has been set; the first balance is the validator-payout; the second is - /// the remainder from the maximum amount of reward. + /// the remainder from the maximum amount of reward. /// [era_index, validator_payout, remainder] EraPayout(EraIndex, Balance, Balance), /// The staker has been rewarded by this amount. [stash, amount] Reward(AccountId, Balance), - /// One validator (and its nominators) has been slashed by the given amount. + /// One validator (and its nominators) has been slashed by the given amount. /// [validator, amount] Slash(AccountId, Balance), /// An old slashing report from a prior era was discarded because it could @@ -2889,7 +2889,7 @@ impl Module { let mut exposure_clipped = exposure; let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); exposure_clipped.others.truncate(clipped_max_len); } >::insert(¤t_era, &stash, exposure_clipped); diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index cdb361336d8..752725ab466 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1238,7 +1238,7 @@ pub trait ChangeMembers { /// /// This resets any previous value of prime. fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { - new.sort_unstable(); + new.sort(); Self::change_members_sorted(incoming, outgoing, &new[..]); } diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 8d9eeebef67..8294c8bfbd6 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -384,7 +384,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { renames.push((version, prefix_function_with_trait(&trait_name, &old_name))); } - renames.sort_unstable_by(|l, r| r.cmp(l)); + renames.sort_by(|l, r| r.cmp(l)); let (versions, old_names) = renames.into_iter().fold( (Vec::new(), Vec::new()), |(mut versions, mut old_names), (version, old_name)| { diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 5c0d2baa51d..e54c6c833d1 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -180,7 +180,7 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // sort output once based on diff. This will require more data transfer and saving original // index, but we sort only twice instead: once now and once at the very end. let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); - output_with_idx.sort_unstable_by_key(|x| x.1); + output_with_idx.sort_by_key(|x| x.1); if needs_bump { // must increase the values a bit. Bump from the min element. Index of minimum is now zero @@ -262,7 +262,7 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str ); // sort again based on the original index. - output_with_idx.sort_unstable_by_key(|x| x.0); + output_with_idx.sort_by_key(|x| x.0); Ok(output_with_idx.into_iter().map(|(_, t)| t).collect()) } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 9ac058f8c3e..2b767d7c79b 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -749,7 +749,7 @@ fn do_balancing( e.1 = 0; }); - elected_edges.sort_unstable_by_key(|e| + elected_edges.sort_by_key(|e| if let Some(e) = support_map.get(&e.0) { e.total } else { Zero::zero() } ); diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index b9c2396b08b..8898c2d8b40 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -264,7 +264,7 @@ pub(crate) fn do_equalize_float( e.1 = 0.0; }); - elected_edges.sort_unstable_by(|x, y| + elected_edges.sort_by(|x, y| support_map.get(&x.0) .and_then(|x| support_map.get(&y.0).and_then(|y| x.total.partial_cmp(&y.total))) .unwrap_or(sp_std::cmp::Ordering::Equal) diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index bf910e2c4f7..675904578be 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -174,7 +174,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( extrinsics.extend( v.extrinsics().cloned() ); - extrinsics.sort_unstable(); + extrinsics.sort(); }, } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 7d1879a4f9f..c8f37a820d2 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -566,7 +566,7 @@ mod tests { count: 1000, }; let mut d = st.make(); - d.sort_unstable_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); + d.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); check_equivalent::(&dr); check_iteration::(&dr); -- GitLab From 2afc3630a8baefb8c6e267a011ab4e4ccaced291 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 29 Jul 2020 14:30:10 +0200 Subject: [PATCH 022/132] Allow `PostDispatchInfo` to disable fees (#6749) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * initial mock * add test * remove unneeded clone * Update frame/support/src/weights.rs Co-authored-by: Alexander Theißen * fix compile * Update frame/support/src/weights.rs Co-authored-by: Alexander Popiak * Update frame/sudo/src/lib.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Alexander Theißen Co-authored-by: Alexander Popiak Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Bastian Köcher --- frame/contracts/src/gas.rs | 1 + frame/contracts/src/tests.rs | 1 + frame/sudo/src/lib.rs | 25 ++++++++-- frame/support/src/weights.rs | 48 ++++++++++++++++++- frame/system/src/extensions/check_weight.rs | 10 +++- frame/transaction-payment/src/lib.rs | 53 +++++++++++++++++++-- 6 files changed, 127 insertions(+), 11 deletions(-) diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 0ae1952de09..d6c7f82753e 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -194,6 +194,7 @@ impl GasMeter { { let post_info = PostDispatchInfo { actual_weight: Some(self.gas_spent()), + pays_fee: Default::default(), }; result diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a2d85bb3135..5b2d7feb3d1 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -291,6 +291,7 @@ fn returns_base_call_cost() { Ok( PostDispatchInfo { actual_weight: Some(67500000), + pays_fee: Default::default(), } ) ); diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 0f614a46467..113fa0dccc6 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -93,7 +93,11 @@ use sp_runtime::{DispatchResult, traits::StaticLookup}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, }; -use frame_support::{weights::{Weight, GetDispatchInfo}, traits::UnfilteredDispatchable}; +use frame_support::{ + weights::{Weight, GetDispatchInfo, Pays}, + traits::UnfilteredDispatchable, + dispatch::DispatchResultWithPostInfo, +}; use frame_system::ensure_signed; #[cfg(test)] @@ -127,13 +131,15 @@ decl_module! { /// - Weight of derivative `call` execution + 10,000. /// # #[weight = (call.get_dispatch_info().weight + 10_000, call.get_dispatch_info().class)] - fn sudo(origin, call: Box<::Call>) { + fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + // Sudo user does not pay a fee. + Ok(Pays::No.into()) } /// Authenticates the sudo key and dispatches a function call with `Root` origin. @@ -147,13 +153,15 @@ decl_module! { /// - The weight of this call is defined by the caller. /// # #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) { + fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + // Sudo user does not pay a fee. + Ok(Pays::No.into()) } /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. @@ -166,7 +174,7 @@ decl_module! { /// - One DB change. /// # #[weight = 0] - fn set_key(origin, new: ::Source) { + fn set_key(origin, new: ::Source) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -174,6 +182,8 @@ decl_module! { Self::deposit_event(RawEvent::KeyChanged(Self::key())); >::put(new); + // Sudo user does not pay a fee. + Ok(Pays::No.into()) } /// Authenticates the sudo key and dispatches a function call with `Signed` origin from @@ -188,7 +198,10 @@ decl_module! { /// - Weight of derivative `call` execution + 10,000. /// # #[weight = (call.get_dispatch_info().weight + 10_000, call.get_dispatch_info().class)] - fn sudo_as(origin, who: ::Source, call: Box<::Call>) { + fn sudo_as(origin, + who: ::Source, + call: Box<::Call> + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -204,6 +217,8 @@ decl_module! { }; Self::deposit_event(RawEvent::SudoAsDone(res)); + // Sudo user does not pay a fee. + Ok(Pays::No.into()) } } } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 595e84333bd..db1e25ca7ab 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -263,10 +263,13 @@ pub trait GetDispatchInfo { } /// Weight information that is only available post dispatch. +/// NOTE: This can only be used to reduce the weight or fee, not increase it. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct PostDispatchInfo { /// Actual weight consumed by a call or `None` which stands for the worst case static weight. pub actual_weight: Option, + /// Whether this transaction should pay fees when all is said and done. + pub pays_fee: Pays, } impl PostDispatchInfo { @@ -283,6 +286,20 @@ impl PostDispatchInfo { info.weight } } + + /// Determine if user should actually pay fees at the end of the dispatch. + pub fn pays_fee(&self, info: &DispatchInfo) -> Pays { + // If they originally were not paying fees, or the post dispatch info + // says they should not pay fees, then they don't pay fees. + // This is because the pre dispatch information must contain the + // worst case for weight and fees paid. + if info.pays_fee == Pays::No || self.pays_fee == Pays::No { + Pays::No + } else { + // Otherwise they pay. + Pays::Yes + } + } } /// Extract the actual weight from a dispatch result if any or fall back to the default weight. @@ -293,10 +310,30 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc }.calc_actual_weight(info) } +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (actual_weight, pays_fee) = post_weight_info; + Self { + actual_weight, + pays_fee, + } + } +} + +impl From for PostDispatchInfo { + fn from(pays_fee: Pays) -> Self { + Self { + actual_weight: None, + pays_fee, + } + } +} + impl From> for PostDispatchInfo { fn from(actual_weight: Option) -> Self { Self { actual_weight, + pays_fee: Default::default(), } } } @@ -305,6 +342,7 @@ impl From<()> for PostDispatchInfo { fn from(_: ()) -> Self { Self { actual_weight: None, + pays_fee: Default::default(), } } } @@ -315,6 +353,11 @@ impl sp_runtime::traits::Printable for PostDispatchInfo { match self.actual_weight { Some(weight) => weight.print(), None => "max-weight".print(), + }; + "pays_fee=".print(); + match self.pays_fee { + Pays::Yes => "Yes".print(), + Pays::No => "No".print(), } } } @@ -338,7 +381,10 @@ impl WithPostDispatchInfo for T where { fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: Some(actual_weight) }, + post_info: PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee: Default::default(), + }, error: self.into(), } } diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index d52138b1e3b..1395aa87efb 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -575,7 +575,10 @@ mod tests { new_test_ext().execute_with(|| { // This is half of the max block weight let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(128), }; + let post_info = PostDispatchInfo { + actual_weight: Some(128), + pays_fee: Default::default(), + }; let len = 0_usize; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight @@ -601,7 +604,10 @@ mod tests { fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(700), }; + let post_info = PostDispatchInfo { + actual_weight: Some(700), + pays_fee: Default::default(), + }; let len = 0_usize; BlockWeight::mutate(|current_weight| { diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 9c624df8ca3..244b4280ade 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -371,7 +371,7 @@ impl Module where ) -> BalanceOf where T::Call: Dispatchable, { - Self::compute_fee_raw(len, post_info.calc_actual_weight(info), tip, info.pays_fee) + Self::compute_fee_raw(len, post_info.calc_actual_weight(info), tip, post_info.pays_fee(info)) } fn compute_fee_raw( @@ -762,11 +762,24 @@ mod tests { } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { - PostDispatchInfo { actual_weight: Some(w), } + PostDispatchInfo { + actual_weight: Some(w), + pays_fee: Default::default(), + } + } + + fn post_info_from_pays(p: Pays) -> PostDispatchInfo { + PostDispatchInfo { + actual_weight: None, + pays_fee: p, + } } fn default_post_info() -> PostDispatchInfo { - PostDispatchInfo { actual_weight: None, } + PostDispatchInfo { + actual_weight: None, + pays_fee: Default::default(), + } } #[test] @@ -1211,4 +1224,38 @@ mod tests { assert_eq!(refund_based_fee, actual_fee); }); } + + #[test] + fn post_info_can_change_pays_fee() { + ExtBuilder::default() + .balance_factor(10) + .base_weight(7) + .build() + .execute_with(|| + { + let info = info_from_weight(100); + let post_info = post_info_from_pays(Pays::No); + let prev_balance = Balances::free_balance(2); + let len = 10; + let tip = 5; + + NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); + + let pre = ChargeTransactionPayment::::from(tip) + .pre_dispatch(&2, CALL, &info, len) + .unwrap(); + + ChargeTransactionPayment:: + ::post_dispatch(pre, &info, &post_info, len, &Ok(())) + .unwrap(); + + let refund_based_fee = prev_balance - Balances::free_balance(2); + let actual_fee = Module:: + ::compute_actual_fee(len as u32, &info, &post_info, tip); + + // Only 5 tip is paid + assert_eq!(actual_fee, 5); + assert_eq!(refund_based_fee, actual_fee); + }); + } } -- GitLab From 075182eec6facd4a491ec8933613bfba0582549b Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 29 Jul 2020 14:33:54 +0200 Subject: [PATCH 023/132] benchmarks! macro: factorize instance usage. (#6750) * factorize benchmark! * fix types * fix types --- frame/benchmarking/src/lib.rs | 499 ++++++-------------------------- frame/benchmarking/src/utils.rs | 17 +- 2 files changed, 97 insertions(+), 419 deletions(-) diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index f306cb51391..bd0aabdaa3f 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -184,7 +184,7 @@ macro_rules! benchmarks { $( $rest:tt )* ) => { $crate::benchmarks_iter!( - NO_INSTANCE + { } { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) @@ -206,7 +206,7 @@ macro_rules! benchmarks_instance { $( $rest:tt )* ) => { $crate::benchmarks_iter!( - INSTANCE + { I } { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) @@ -220,16 +220,16 @@ macro_rules! benchmarks_instance { macro_rules! benchmarks_iter { // mutation arm: ( - $instance:ident + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { - $instance + { $( $instance)? } { $( $where_clause )* } { $( $common )* } ( $( $names )* ) @@ -238,49 +238,27 @@ macro_rules! benchmarks_iter { $( $rest )* } }; - // no instance mutation arm: - ( - NO_INSTANCE - { $( $where_clause:tt )* } - { $( $common:tt )* } - ( $( $names:ident )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) - verify $postcode:block - $( $rest:tt )* - ) => { - $crate::benchmarks_iter! { - NO_INSTANCE - { $( $where_clause )* } - { $( $common )* } - ( $( $names )* ) - $name { $( $code )* }: { - < - Call as $crate::frame_support::traits::UnfilteredDispatchable - >::dispatch_bypass_filter(Call::::$dispatch($($arg),*), $origin.into())?; - } - verify $postcode - $( $rest )* - } - }; - // instance mutation arm: + // mutation arm: ( - INSTANCE + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { - INSTANCE + { $( $instance)? } { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: { < - Call as $crate::frame_support::traits::UnfilteredDispatchable - >::dispatch_bypass_filter(Call::::$dispatch($($arg),*), $origin.into())?; + Call as $crate::frame_support::traits::UnfilteredDispatchable + >::dispatch_bypass_filter( + Call::::$dispatch($($arg),*), $origin.into() + )?; } verify $postcode $( $rest )* @@ -288,16 +266,16 @@ macro_rules! benchmarks_iter { }; // iteration arm: ( - $instance:ident + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) $name:ident { $( $code:tt )* }: $eval:block verify $postcode:block $( $rest:tt )* ) => { $crate::benchmark_backend! { - $instance + { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } @@ -308,32 +286,41 @@ macro_rules! benchmarks_iter { } #[cfg(test)] - $crate::impl_benchmark_test!( { $( $where_clause )* } $instance $name ); + $crate::impl_benchmark_test!( + { $( $where_clause )* } + { $( $instance)? } + $name + ); $crate::benchmarks_iter!( - $instance + { $( $instance)? } { $( $where_clause )* } { $( $common )* } - ( $( $names )* $name ) + ( $( $names )* { $( $instance )? } $name ) $( $rest )* ); }; // iteration-exit arm - ( $instance:ident { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) ) => { - $crate::selected_benchmark!( { $( $where_clause)* } $instance $( $names ),* ); - $crate::impl_benchmark!( { $( $where_clause )* } $instance $( $names ),* ); + ( + { $( $instance:ident )? } + { $( $where_clause:tt )* } + { $( $common:tt )* } + ( $( $names:tt )* ) + ) => { + $crate::selected_benchmark!( { $( $where_clause)* } { $( $instance)? } $( $names )* ); + $crate::impl_benchmark!( { $( $where_clause )* } { $( $instance)? } $( $names )* ); }; // add verify block to _() format ( - $instance:ident + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { - $instance + { $( $instance)? } { $( $where_clause )* } { $( $common )* } ( $( $names )* ) @@ -344,15 +331,15 @@ macro_rules! benchmarks_iter { }; // add verify block to name() format ( - $instance:ident + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { - $instance + { $( $instance)? } { $( $where_clause )* } { $( $common )* } ( $( $names )* ) @@ -363,15 +350,15 @@ macro_rules! benchmarks_iter { }; // add verify block to {} format ( - $instance:ident + { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } - ( $( $names:ident )* ) + ( $( $names:tt )* ) $name:ident { $( $code:tt )* }: $eval:block $( $rest:tt )* ) => { $crate::benchmarks_iter!( - $instance + { $( $instance)? } { $( $where_clause )* } { $( $common )* } ( $( $names )* ) @@ -386,7 +373,7 @@ macro_rules! benchmarks_iter { #[doc(hidden)] macro_rules! benchmark_backend { // parsing arms - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( $common:tt )* @@ -397,13 +384,13 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } } { $eval } { $( $rest )* } $postcode } }; - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( $common:tt )* @@ -414,14 +401,14 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } } { $eval } { $( $rest )* } $postcode } }; // mutation arm to look after defaulting to a common param - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* @@ -432,7 +419,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { + { $( $instance)? } $name { $( $where_clause )* } { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -446,7 +433,7 @@ macro_rules! benchmark_backend { } }; // mutation arm to look after defaulting only the range to common param - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* @@ -457,7 +444,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { + { $( $instance)? } $name { $( $where_clause )* } { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -471,7 +458,7 @@ macro_rules! benchmark_backend { } }; // mutation arm to look after a single tt for param_from. - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( $common:tt )* @@ -482,14 +469,15 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + { $( $instance)? } + $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $param in ( $param_from ) .. $param_to => $param_instancer; $( $rest )* } $postcode } }; // mutation arm to look after the default tail of `=> ()` - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( $common:tt )* @@ -500,14 +488,15 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + { $( $instance)? } + $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $param in $param_from .. $param_to => (); $( $rest )* } $postcode } }; // mutation arm to look after `let _ =` - ($instance:ident $name:ident { + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( $common:tt )* @@ -518,14 +507,15 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + { $( $instance)? } + $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $pre_id : _ = $pre_ex; $( $rest )* } $postcode } }; - // no instance actioning arm - (NO_INSTANCE $name:ident { + // actioning arm + ( { $( $instance:ident )? } $name:ident { $( $where_clause:tt )* } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* @@ -536,7 +526,8 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl $crate::BenchmarkingSetup for $name + impl, I: Instance)? > + $crate::BenchmarkingSetup for $name where $( $where_clause )* { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { @@ -590,72 +581,6 @@ macro_rules! benchmark_backend { } } }; - // instance actioning arm - (INSTANCE $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* - $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* - } { $eval:block } { $( $post:tt )* } $postcode:block) => { - #[allow(non_camel_case_types)] - struct $name; - #[allow(unused_variables)] - impl, I: Instance> $crate::BenchmarkingSetupInstance for $name - where $( $where_clause )* - { - fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { - vec! [ - $( - ($crate::BenchmarkParameter::$param, $param_from, $param_to) - ),* - ] - } - - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - $( - let $common = $common_from; - )* - $( - // Prepare instance - let $param = components.iter() - .find(|&c| c.0 == $crate::BenchmarkParameter::$param) - .unwrap().1; - )* - $( - let $pre_id : $pre_ty = $pre_ex; - )* - $( $param_instancer ; )* - $( $post )* - - Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - $( - let $common = $common_from; - )* - $( - // Prepare instance - let $param = components.iter() - .find(|&c| c.0 == $crate::BenchmarkParameter::$param) - .unwrap().1; - )* - $( - let $pre_id : $pre_ty = $pre_ex; - )* - $( $param_instancer ; )* - $( $post )* - - Ok(Box::new(move || -> Result<(), &'static str> { $eval; $postcode; Ok(()) })) - } - } - } } // Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. @@ -670,14 +595,15 @@ macro_rules! benchmark_backend { // struct SetBalance; // impl BenchmarkingSetup for SetBalance { ... } // -// selected_benchmark!(Transfer, SetBalance); +// selected_benchmark!({} Transfer {} SetBalance); // ``` #[macro_export] #[doc(hidden)] macro_rules! selected_benchmark { ( { $( $where_clause:tt )* } - NO_INSTANCE $( $bench:ident ),* + { $( $instance:ident )? } + $( { $( $bench_inst:ident )? } $bench:ident )* ) => { // The list of available benchmarks for this pallet. #[allow(non_camel_case_types)] @@ -686,12 +612,17 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl $crate::BenchmarkingSetup for SelectedBenchmark + impl, I: Instance )? > + $crate::BenchmarkingSetup for SelectedBenchmark where $( $where_clause )* { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::components(&$bench), )* + $( + Self::$bench => < + $bench as $crate::BenchmarkingSetup + >::components(&$bench), + )* } } @@ -699,7 +630,11 @@ macro_rules! selected_benchmark { -> Result Result<(), &'static str>>, &'static str> { match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::instance(&$bench, components), )* + $( + Self::$bench => < + $bench as $crate::BenchmarkingSetup + >::instance(&$bench, components), + )* } } @@ -707,48 +642,15 @@ macro_rules! selected_benchmark { -> Result Result<(), &'static str>>, &'static str> { match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::verify(&$bench, components), )* + $( + Self::$bench => < + $bench as $crate::BenchmarkingSetup + >::verify(&$bench, components), + )* } } } }; - ( - { $( $where_clause:tt )* } - INSTANCE $( $bench:ident ),* - ) => { - // The list of available benchmarks for this pallet. - #[allow(non_camel_case_types)] - enum SelectedBenchmark { - $( $bench, )* - } - - // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance> $crate::BenchmarkingSetupInstance for SelectedBenchmark - where $( $where_clause )* - { - fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { - match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::components(&$bench), )* - } - } - - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::instance(&$bench, components), )* - } - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::verify(&$bench, components), )* - } - } - } - } } #[macro_export] @@ -756,9 +658,11 @@ macro_rules! selected_benchmark { macro_rules! impl_benchmark { ( { $( $where_clause:tt )* } - NO_INSTANCE $( $name:ident ),* + { $( $instance:ident )? } + $( { $( $name_inst:ident )? } $name:ident )* ) => { - impl $crate::Benchmarking<$crate::BenchmarkResults> for Module + impl, I: Instance)? > + $crate::Benchmarking<$crate::BenchmarkResults> for Module where T: frame_system::Trait, $( $where_clause )* { fn benchmarks() -> Vec<&'static [u8]> { @@ -788,7 +692,9 @@ macro_rules! impl_benchmark { $crate::benchmarking::commit_db(); $crate::benchmarking::wipe_db(); - let components = >::components(&selected_benchmark); + let components = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::components(&selected_benchmark); let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); // Default number of steps for a component. @@ -804,7 +710,7 @@ macro_rules! impl_benchmark { // Set up the externalities environment for the setup we want to // benchmark. let closure_to_benchmark = < - SelectedBenchmark as $crate::BenchmarkingSetup + SelectedBenchmark as $crate::BenchmarkingSetup >::instance(&selected_benchmark, &c)?; // Set the block number to at least 1 so events are deposited. @@ -909,167 +815,6 @@ macro_rules! impl_benchmark { } } }; - ( - { $( $where_clause:tt )* } - INSTANCE $( $name:ident ),* - ) => { - impl, I: Instance> $crate::Benchmarking<$crate::BenchmarkResults> - for Module - where T: frame_system::Trait, $( $where_clause )* - { - fn benchmarks() -> Vec<&'static [u8]> { - vec![ $( stringify!($name).as_ref() ),* ] - } - - fn run_benchmark( - extrinsic: &[u8], - lowest_range_values: &[u32], - highest_range_values: &[u32], - steps: &[u32], - repeat: u32, - whitelist: &[Vec] - ) -> Result, &'static str> { - // Map the input to the selected benchmark. - let extrinsic = sp_std::str::from_utf8(extrinsic) - .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; - let selected_benchmark = match extrinsic { - $( stringify!($name) => SelectedBenchmark::$name, )* - _ => return Err("Could not find extrinsic."), - }; - - // Add whitelist to DB - $crate::benchmarking::set_whitelist(whitelist.to_vec()); - - // Warm up the DB - $crate::benchmarking::commit_db(); - $crate::benchmarking::wipe_db(); - - let components = < - SelectedBenchmark as $crate::BenchmarkingSetupInstance - >::components(&selected_benchmark); - let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); - - // Default number of steps for a component. - let mut prev_steps = 10; - - let repeat_benchmark = | - repeat: u32, - c: Vec<($crate::BenchmarkParameter, u32)>, - results: &mut Vec<$crate::BenchmarkResults>, - | -> Result<(), &'static str> { - // Run the benchmark `repeat` times. - for _ in 0..repeat { - // Set up the externalities environment for the setup we want to - // benchmark. - let closure_to_benchmark = < - SelectedBenchmark as $crate::BenchmarkingSetupInstance - >::instance(&selected_benchmark, &c)?; - - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } - - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - $crate::benchmarking::commit_db(); - - // Reset the read/write counter so we don't count operations in the setup process. - $crate::benchmarking::reset_read_write_count(); - - // Time the extrinsic logic. - frame_support::debug::trace!( - target: "benchmark", - "Start Benchmark: {:?}", - c, - ); - - let start_extrinsic = $crate::benchmarking::current_time(); - closure_to_benchmark()?; - let finish_extrinsic = $crate::benchmarking::current_time(); - let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - // Commit the changes to get proper write count - $crate::benchmarking::commit_db(); - frame_support::debug::trace!( - target: "benchmark", - "End Benchmark: {} ns", - elapsed_extrinsic, - ); - let read_write_count = $crate::benchmarking::read_write_count(); - frame_support::debug::trace!( - target: "benchmark", - "Read/Write Count {:?}", - read_write_count, - ); - - // Time the storage root recalculation. - let start_storage_root = $crate::benchmarking::current_time(); - $crate::storage_root(); - let finish_storage_root = $crate::benchmarking::current_time(); - let elapsed_storage_root = finish_storage_root - start_storage_root; - - results.push($crate::BenchmarkResults { - components: c.clone(), - extrinsic_time: elapsed_extrinsic, - storage_root_time: elapsed_storage_root, - reads: read_write_count.0, - repeat_reads: read_write_count.1, - writes: read_write_count.2, - repeat_writes: read_write_count.3, - }); - - // Wipe the DB back to the genesis state. - $crate::benchmarking::wipe_db(); - } - - Ok(()) - }; - - if components.is_empty() { - repeat_benchmark(repeat, Default::default(), &mut results)?; - } else { - // Select the component we will be benchmarking. Each component will be benchmarked. - for (idx, (name, low, high)) in components.iter().enumerate() { - // Get the number of steps for this component. - let steps = steps.get(idx).cloned().unwrap_or(prev_steps); - prev_steps = steps; - - // Skip this loop if steps is zero - if steps == 0 { continue } - - let lowest = lowest_range_values.get(idx).cloned().unwrap_or(*low); - let highest = highest_range_values.get(idx).cloned().unwrap_or(*high); - - let diff = highest - lowest; - - // Create up to `STEPS` steps for that component between high and low. - let step_size = (diff / steps).max(1); - let num_of_steps = diff / step_size + 1; - - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = lowest + step_size * s; - - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(idx, (n, _, h))| - if n == name { - (*n, component_value) - } else { - (*n, *highest_range_values.get(idx).unwrap_or(h)) - } - ) - .collect(); - - repeat_benchmark(repeat, c, &mut results)?; - } - } - } - return Ok(results); - } - } - } } // This creates a unit test for one benchmark of the main benchmark macro. @@ -1080,16 +825,16 @@ macro_rules! impl_benchmark { macro_rules! impl_benchmark_test { ( { $( $where_clause:tt )* } - NO_INSTANCE + { $( $instance:ident )? } $name:ident ) => { $crate::paste::item! { - fn [] () -> Result<(), &'static str> + fn [] () -> Result<(), &'static str> where T: frame_system::Trait, $( $where_clause )* { let selected_benchmark = SelectedBenchmark::$name; let components = < - SelectedBenchmark as $crate::BenchmarkingSetup + SelectedBenchmark as $crate::BenchmarkingSetup >::components(&selected_benchmark); let execute_benchmark = | @@ -1097,7 +842,7 @@ macro_rules! impl_benchmark_test { | -> Result<(), &'static str> { // Set up the verification state let closure_to_verify = < - SelectedBenchmark as $crate::BenchmarkingSetup + SelectedBenchmark as $crate::BenchmarkingSetup >::verify(&selected_benchmark, &c)?; // Set the block number to at least 1 so events are deposited. @@ -1140,64 +885,6 @@ macro_rules! impl_benchmark_test { } } }; - ( - { $( $where_clause:tt )* } - INSTANCE - $name:ident - ) => { - $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait, $( $where_clause )* - { - let selected_benchmark = SelectedBenchmark::$name; - let components = < - SelectedBenchmark as $crate::BenchmarkingSetupInstance - >::components(&selected_benchmark); - - let execute_benchmark = | - c: Vec<($crate::BenchmarkParameter, u32)> - | -> Result<(), &'static str> { - // Set up the verification state - let closure_to_verify = < - SelectedBenchmark as $crate::BenchmarkingSetupInstance - >::verify(&selected_benchmark, &c)?; - - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } - - // Run verification - closure_to_verify()?; - - // Reset the state - $crate::benchmarking::wipe_db(); - - Ok(()) - }; - - for (_, (name, low, high)) in components.iter().enumerate() { - // Test only the low and high value, assuming values in the middle won't break - for component_value in vec![low, high] { - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(_, (n, _, h))| - if n == name { - (*n, *component_value) - } else { - (*n, *h) - } - ) - .collect(); - - execute_benchmark(c)?; - } - } - Ok(()) - } - } - }; } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 7f9d9121100..e5d40c74432 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -134,19 +134,10 @@ pub trait Benchmarking { } /// The required setup for creating a benchmark. -pub trait BenchmarkingSetup { - /// Return the components and their ranges which should be tested in this benchmark. - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; - - /// Set up the storage, and prepare a closure to run the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; - - /// Set up the storage, and prepare a closure to test and verify the benchmark - fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; -} - -/// The required setup for creating a benchmark. -pub trait BenchmarkingSetupInstance { +/// +/// Instance generic parameter is optional and can be used in order to capture unused generics for +/// instantiable pallets. +pub trait BenchmarkingSetup { /// Return the components and their ranges which should be tested in this benchmark. fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; -- GitLab From 075b2d78e10ed0fdadd4df30fac4f8fcad1fe4b9 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 29 Jul 2020 14:44:15 +0200 Subject: [PATCH 024/132] client/network: Add peers to DHT only if protocols match (#6549) * client/network/src/discovery: Adjust to Kademlia API changes * client/network: Add peers to DHT only if protocols match With https://github.com/libp2p/rust-libp2p/pull/1628 rust-libp2p allows manually controlling which peers are inserted into the routing table. Instead of adding each peer to the routing table automatically, insert them only if they support the local nodes protocol id (e.g. `dot`) retrieved via the `identify` behaviour. For now this works around https://github.com/libp2p/rust-libp2p/issues/1611. In the future one might add more requirements. For example one might try to exclude light-clients. * Cargo.toml: Remove crates.io patch for libp2p * client/network/src/behaviour: Adjust to PeerInfo name change * client/network/src/discovery: Rework Kademlia event matching * client/network/discovery: Add trace on adding peer to DHT * client/network/discovery: Retrieve protocol name from kad behaviour * client/network/discovery: Fix formatting * client/network: Change DiscoveryBehaviour::add_self_reported signature * client/network: Document manual insertion strategy * client/network/discovery: Remove TODO for ignoring DHT address Co-authored-by: Pierre Krieger --- client/network/src/behaviour.rs | 29 ++-- client/network/src/discovery.rs | 237 +++++++++++++++++++++++++------- 2 files changed, 212 insertions(+), 54 deletions(-) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 9a466388f4f..2afcd274138 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -25,6 +25,7 @@ use bytes::Bytes; use codec::Encode as _; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; +use libp2p::identify::IdentifyInfo; use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; use log::debug; @@ -413,16 +414,28 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { - let peer_info::PeerInfoEvent::Identified { peer_id, mut info } = event; - if info.listen_addrs.len() > 30 { - debug!(target: "sub-libp2p", "Node {:?} has reported more than 30 addresses; \ - it is identified by {:?} and {:?}", peer_id, info.protocol_version, - info.agent_version + let peer_info::PeerInfoEvent::Identified { + peer_id, + info: IdentifyInfo { + protocol_version, + agent_version, + mut listen_addrs, + protocols, + .. + }, + } = event; + + if listen_addrs.len() > 30 { + debug!( + target: "sub-libp2p", + "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", + peer_id, protocol_version, agent_version ); - info.listen_addrs.truncate(30); + listen_addrs.truncate(30); } - for addr in &info.listen_addrs { - self.discovery.add_self_reported_address(&peer_id, addr.clone()); + + for addr in listen_addrs { + self.discovery.add_self_reported_address(&peer_id, protocols.iter(), addr); } self.substrate.add_discovered_nodes(iter::once(peer_id)); } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index d08f9d44a15..8216d6b2cbe 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -52,7 +52,7 @@ use ip_network::IpNetwork; use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use libp2p::swarm::protocols_handler::multi::MultiHandler; -use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; +use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; use libp2p::kad::GetClosestPeersError; use libp2p::kad::handler::KademliaHandler; use libp2p::kad::QueryId; @@ -137,17 +137,9 @@ impl DiscoveryConfig { } /// Add discovery via Kademlia for the given protocol. - pub fn add_protocol(&mut self, p: ProtocolId) -> &mut Self { - // NB: If this protocol name derivation is changed, check if - // `DiscoveryBehaviour::new_handler` is still correct. - let proto_name = { - let mut v = vec![b'/']; - v.extend_from_slice(p.as_bytes()); - v.extend_from_slice(b"/kad"); - v - }; - - self.add_kademlia(p, proto_name); + pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { + let name = protocol_name_from_protocol_id(&id); + self.add_kademlia(id, name); self } @@ -159,6 +151,10 @@ impl DiscoveryConfig { let mut config = KademliaConfig::default(); config.set_protocol_name(proto_name); + // By default Kademlia attempts to insert all peers into its routing table once a dialing + // attempt succeeds. In order to control which peer is added, disable the auto-insertion and + // instead add peers manually. + config.set_kbucket_inserts(KademliaBucketInserts::Manual); let store = MemoryStore::new(self.local_peer_id.clone()); let mut kad = Kademlia::with_config(self.local_peer_id.clone(), store, config); @@ -259,17 +255,43 @@ impl DiscoveryBehaviour { } } - /// Call this method when a node reports an address for itself. + /// Add a self-reported address of a remote peer to the k-buckets of the supported + /// DHTs (`supported_protocols`). /// - /// **Note**: It is important that you call this method, otherwise the discovery mechanism will - /// not properly work. - pub fn add_self_reported_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { - if self.allow_non_globals_in_dht || self.can_add_to_dht(&addr) { - for k in self.kademlias.values_mut() { - k.add_address(peer_id, addr.clone()); + /// **Note**: It is important that you call this method. The discovery mechanism will not + /// automatically add connecting peers to the Kademlia k-buckets. + pub fn add_self_reported_address( + &mut self, + peer_id: &PeerId, + supported_protocols: impl Iterator>, + addr: Multiaddr + ) { + if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { + log::trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); + return + } + + let mut added = false; + for protocol in supported_protocols { + for kademlia in self.kademlias.values_mut() { + if protocol.as_ref() == kademlia.protocol_name() { + log::trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT {}.", + addr, peer_id, String::from_utf8_lossy(kademlia.protocol_name()), + ); + kademlia.add_address(peer_id, addr.clone()); + added = true; + } } - } else { - log::trace!(target: "sub-libp2p", "Ignoring self-reported address {} from {}", addr, peer_id); + } + + if !added { + log::trace!( + target: "sub-libp2p", + "Ignoring self-reported address {} from {} as remote node is not part of any \ + Kademlia DHTs supported by the local node.", addr, peer_id, + ); } } @@ -340,17 +362,21 @@ impl DiscoveryBehaviour { } /// Event generated by the `DiscoveryBehaviour`. +#[derive(Debug)] pub enum DiscoveryOut { - /// The address of a peer has been added to the Kademlia routing table. - /// - /// Can be called multiple times with the same identity. + /// A connection to a peer has been established but the peer has not been + /// added to the routing table because [`KademliaBucketInserts::Manual`] is + /// configured. If the peer is to be included in the routing table, it must + /// be explicitly added via + /// [`DiscoveryBehaviour::add_self_reported_address`]. Discovered(PeerId), /// A peer connected to this node for whom no listen address is known. /// /// In order for the peer to be added to the Kademlia routing table, a known - /// listen address must be added via [`DiscoveryBehaviour::add_self_reported_address`], - /// e.g. obtained through the `identify` protocol. + /// listen address must be added via + /// [`DiscoveryBehaviour::add_self_reported_address`], e.g. obtained through + /// the `identify` protocol. UnroutablePeer(PeerId), /// The DHT yielded results for the record request, grouped in (key, value) pairs. @@ -569,8 +595,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = DiscoveryOut::UnroutablePeer(peer); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } - KademliaEvent::RoutablePeer { .. } | KademliaEvent::PendingRoutablePeer { .. } => { - // We are not interested in these events at the moment. + KademliaEvent::RoutablePeer { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::PendingRoutablePeer { .. } => { + // We are not interested in this event at the moment. } KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(res), .. } => { match res { @@ -689,25 +719,36 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } +// NB: If this protocol name derivation is changed, check if +// `DiscoveryBehaviour::new_handler` is still correct. +fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { + let mut v = vec![b'/']; + v.extend_from_slice(id.as_bytes()); + v.extend_from_slice(b"/kad"); + v +} + #[cfg(test)] mod tests { use crate::config::ProtocolId; use futures::prelude::*; use libp2p::identity::Keypair; - use libp2p::Multiaddr; + use libp2p::{Multiaddr, PeerId}; use libp2p::core::upgrade; use libp2p::core::transport::{Transport, MemoryTransport}; use libp2p::core::upgrade::{InboundUpgradeExt, OutboundUpgradeExt}; use libp2p::swarm::Swarm; use std::{collections::HashSet, task::Poll}; - use super::{DiscoveryConfig, DiscoveryOut}; + use super::{DiscoveryConfig, DiscoveryOut, protocol_name_from_protocol_id}; #[test] fn discovery_working() { - let mut user_defined = Vec::new(); + let mut first_swarm_peer_id_and_addr = None; + let protocol_id = ProtocolId::from(b"dot".as_ref()); - // Build swarms whose behaviour is `DiscoveryBehaviour`. - let mut swarms = (0..25).map(|_| { + // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of + // the first swarm via `with_user_defined`. + let mut swarms = (0..25).map(|i| { let keypair = Keypair::generate_ed25519(); let keypair2 = keypair.clone(); @@ -730,14 +771,12 @@ mod tests { }); let behaviour = { - let protocol_id: &[u8] = b"/test/kad/1.0.0"; - let mut config = DiscoveryConfig::new(keypair.public()); - config.with_user_defined(user_defined.clone()) + config.with_user_defined(first_swarm_peer_id_and_addr.clone()) .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .add_protocol(ProtocolId::from(protocol_id)); + .add_protocol(protocol_id.clone()); config.finish() }; @@ -745,8 +784,8 @@ mod tests { let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - if user_defined.is_empty() { - user_defined.push((keypair.public().into_peer_id(), listen_addr.clone())); + if i == 0 { + first_swarm_peer_id_and_addr = Some((keypair.public().into_peer_id(), listen_addr.clone())) } Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); @@ -755,7 +794,10 @@ mod tests { // Build a `Vec>` with the list of nodes remaining to be discovered. let mut to_discover = (0..swarms.len()).map(|n| { - (0..swarms.len()).filter(|p| *p != n) + (0..swarms.len()) + // Skip the first swarm as all other swarms already know it. + .skip(1) + .filter(|p| *p != n) .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) .collect::>() }).collect::>(); @@ -766,7 +808,7 @@ mod tests { match swarms[swarm_n].0.poll_next_unpin(cx) { Poll::Ready(Some(e)) => { match e { - DiscoveryOut::UnroutablePeer(other) => { + DiscoveryOut::UnroutablePeer(other) | DiscoveryOut::Discovered(other) => { // Call `add_self_reported_address` to simulate identify happening. let addr = swarms.iter().find_map(|(s, a)| if s.local_peer_id == other { @@ -775,12 +817,16 @@ mod tests { None }) .unwrap(); - swarms[swarm_n].0.add_self_reported_address(&other, addr); - }, - DiscoveryOut::Discovered(other) => { + swarms[swarm_n].0.add_self_reported_address( + &other, + [protocol_name_from_protocol_id(&protocol_id)].iter(), + addr, + ); + to_discover[swarm_n].remove(&other); - } - _ => {} + }, + DiscoveryOut::RandomKademliaStarted(_) => {}, + e => {panic!("Unexpected event: {:?}", e)}, } continue 'polling } @@ -799,4 +845,103 @@ mod tests { futures::executor::block_on(fut); } + + #[test] + fn discovery_ignores_peers_with_unknown_protocols() { + let supported_protocol_id = ProtocolId::from(b"a".as_ref()); + let unsupported_protocol_id = ProtocolId::from(b"b".as_ref()); + + let mut discovery = { + let keypair = Keypair::generate_ed25519(); + let mut config = DiscoveryConfig::new(keypair.public()); + config.allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(supported_protocol_id.clone()); + config.finish() + }; + + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + // Add remote peer with unsupported protocol. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&unsupported_protocol_id)].iter(), + remote_addr.clone(), + ); + + for kademlia in discovery.kademlias.values_mut() { + assert!( + kademlia.kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with unsupported protocol not to be added." + ); + } + + // Add remote peer with supported protocol. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&supported_protocol_id)].iter(), + remote_addr.clone(), + ); + + for kademlia in discovery.kademlias.values_mut() { + assert_eq!( + 1, + kademlia.kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .num_entries(), + "Expect peer with supported protocol to be added." + ); + } + } + + #[test] + fn discovery_adds_peer_to_kademlia_of_same_protocol_only() { + let protocol_a = ProtocolId::from(b"a".as_ref()); + let protocol_b = ProtocolId::from(b"b".as_ref()); + + let mut discovery = { + let keypair = Keypair::generate_ed25519(); + let mut config = DiscoveryConfig::new(keypair.public()); + config.allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_a.clone()) + .add_protocol(protocol_b.clone()); + config.finish() + }; + + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + // Add remote peer with `protocol_a` only. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&protocol_a)].iter(), + remote_addr.clone(), + ); + + assert_eq!( + 1, + discovery.kademlias.get_mut(&protocol_a) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .num_entries(), + "Expected remote peer to be added to `protocol_a` Kademlia instance.", + + ); + + assert!( + discovery.kademlias.get_mut(&protocol_b) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expected remote peer not to be added to `protocol_b` Kademlia instance.", + ); + } } -- GitLab From a5d6c905b663f88d8d7ac2ac5616a4ba4ffcf450 Mon Sep 17 00:00:00 2001 From: Ashley Date: Thu, 30 Jul 2020 09:41:08 +0200 Subject: [PATCH 025/132] Add `memory-tracker` feature to `sp-trie` to fix wasm panic (#6745) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add memory tracker feature to sp-trie to fix wasm panic * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- Cargo.lock | 7 ++++--- bin/node/cli/Cargo.toml | 1 + primitives/trie/Cargo.toml | 1 + primitives/trie/src/lib.rs | 17 ++++++++++++++--- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0fe8e4919ef..93aa1d393fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3294,9 +3294,9 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0777fbb396f666701d939e9b3876c18ada6b3581257d88631f2590bc366d8ebe" +checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" dependencies = [ "hash-db", "hashbrown 0.8.0", @@ -3640,6 +3640,7 @@ dependencies = [ "sp-runtime", "sp-timestamp", "sp-transaction-pool", + "sp-trie", "structopt", "substrate-browser-utils", "substrate-build-script-utils", @@ -9345,7 +9346,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" dependencies = [ - "rand 0.7.3", + "rand 0.3.23", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 03620976be8..4fbb48513b3 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -109,6 +109,7 @@ browser-utils = { package = "substrate-browser-utils", path = "../../../utils/br node-executor = { version = "2.0.0-rc5", path = "../executor", features = [ "wasmtime" ] } sc-cli = { version = "0.8.0-rc5", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } +sp-trie = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } [dev-dependencies] sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index c296acaa50f..8dd386e0951 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -43,3 +43,4 @@ std = [ "trie-root/std", "sp-core/std", ] +memory-tracker = [] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c8f37a820d2..5ab06cecca6 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -78,6 +78,11 @@ impl TrieConfiguration for Layout { } } +#[cfg(not(feature = "memory-tracker"))] +type MemTracker = memory_db::NoopTracker; +#[cfg(feature = "memory-tracker")] +type MemTracker = memory_db::MemCounter; + /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. @@ -88,13 +93,19 @@ pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). -pub type PrefixedMemoryDB = memory_db::MemoryDB, trie_db::DBValue>; +pub type PrefixedMemoryDB = memory_db::MemoryDB< + H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker +>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). -pub type MemoryDB = memory_db::MemoryDB, trie_db::DBValue>; +pub type MemoryDB = memory_db::MemoryDB< + H, memory_db::HashKey, trie_db::DBValue, MemTracker, +>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB; +pub type GenericMemoryDB = memory_db::MemoryDB< + H, KF, trie_db::DBValue, MemTracker +>; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; -- GitLab From 293bcd6059f1e4e3dd95aa64989dadef4234222d Mon Sep 17 00:00:00 2001 From: Aten Date: Thu, 30 Jul 2020 16:41:32 +0800 Subject: [PATCH 026/132] support custom ss58addressformat in from_ss58check_with_version (#5526) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * support custom ss58addressformat in from_ss58check_with_version * fix str parse 1. if can parse with u8, use u8 into. 2. if u8 can't parse, convert to str then parse * add a test * typo * add error description in test * fix the `TryFrom` for `Ss58AddressFormat` change check logic in TryFrom to replace modified code in `from_ss58check_with_version` * use Ss58AddressFormat::default() replace DEFAULT_VERSION * Apply suggestions from code review * Update primitives/core/src/crypto.rs * Update primitives/core/src/crypto.rs * Update primitives/core/src/crypto.rs * Update primitives/core/src/crypto.rs Co-authored-by: Bastian Köcher --- primitives/core/src/crypto.rs | 13 +++++++++++-- primitives/core/src/ecdsa.rs | 18 +++++++++++++++++- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index c34115ac8c0..0cdbebde9f2 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -366,7 +366,16 @@ macro_rules! ss58_address_format { fn try_from(x: u8) -> Result { match x { $($number => Ok(Ss58AddressFormat::$identifier)),*, - _ => Err(()), + _ => { + #[cfg(feature = "std")] + match Ss58AddressFormat::default() { + Ss58AddressFormat::Custom(n) if n == x => Ok(Ss58AddressFormat::Custom(x)), + _ => Err(()), + } + + #[cfg(not(feature = "std"))] + Err(()) + }, } } } @@ -377,7 +386,7 @@ macro_rules! ss58_address_format { fn try_from(x: &'a str) -> Result { match x { $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ()), + a => a.parse::().map_err(|_| ()).and_then(TryFrom::try_from), } } } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 29fa9a9c5c5..da6b7614c7f 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -552,7 +552,7 @@ impl CryptoType for Pair { mod test { use super::*; use hex_literal::hex; - use crate::crypto::DEV_PHRASE; + use crate::crypto::{DEV_PHRASE, set_default_ss58_version}; use serde_json; #[test] @@ -676,6 +676,22 @@ mod test { assert_eq!(cmp, public); } + #[test] + fn ss58check_custom_format_works() { + use crate::crypto::Ss58AddressFormat; + // temp save default format version + let default_format = Ss58AddressFormat::default(); + // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` + set_default_ss58_version(Ss58AddressFormat::Custom(200)); + // custom addr encoded by version 200 + let addr = "2X64kMNEWAW5KLZMSKcGKEc96MyuaRsRUku7vomuYxKgqjVCRj"; + Public::from_ss58check(&addr).unwrap(); + set_default_ss58_version(default_format); + // set current ss58 version to default version + let addr = "KWAfgC2aRG5UVD6CpbPQXCx4YZZUhvWqqAJE6qcYc9Rtr6g5C"; + Public::from_ss58check(&addr).unwrap(); + } + #[test] fn signature_serialization_works() { let pair = Pair::from_seed(b"12345678901234567890123456789012"); -- GitLab From 19c1d9028d8d6eabef41693433b56e14da025247 Mon Sep 17 00:00:00 2001 From: Ashley Date: Thu, 30 Jul 2020 11:02:12 +0200 Subject: [PATCH 027/132] Add a `DefaultQueue` type alias to remove the need to use `sp_api::TransactionFor` (#6761) * Add DefaultQueue * Add DefaultImportQueue to the top level of sp-consensus --- Cargo.lock | 1 + bin/node-template/node/src/service.rs | 2 +- bin/node/cli/src/service.rs | 2 +- client/consensus/aura/src/lib.rs | 7 ++----- client/consensus/babe/src/lib.rs | 7 ++----- primitives/consensus/common/Cargo.toml | 1 + primitives/consensus/common/src/import_queue.rs | 5 +++++ primitives/consensus/common/src/lib.rs | 1 + 8 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93aa1d393fb..574e4f48025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7750,6 +7750,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.10.2", "serde", + "sp-api", "sp-core", "sp-inherents", "sp-runtime", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 599560355ec..0de17103b05 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -24,7 +24,7 @@ type FullSelectChain = sc_consensus::LongestChain; pub fn new_partial(config: &Configuration) -> Result, + sp_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( sc_finality_grandpa::GrandpaBlockImport, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 322e9bf1d8a..a47869ed832 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -46,7 +46,7 @@ type LightClient = sc_service::TLightClient; pub fn new_partial(config: &Configuration) -> Result, + sp_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( impl Fn(node_rpc::DenyUnsafe) -> node_rpc::IoHandler, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 244b7e929ef..4e6cb49f112 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -47,7 +47,7 @@ use sp_consensus::{ BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult }; use sp_consensus::import_queue::{ - Verifier, BasicQueue, BoxJustificationImport, BoxFinalityProofImport, + Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, BoxFinalityProofImport, }; use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{ @@ -713,9 +713,6 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) } -/// The Aura import queue type. -pub type AuraImportQueue = BasicQueue>; - /// Register the aura inherent data provider, if not registered already. fn register_aura_inherent_data_provider( inherent_data_providers: &InherentDataProviders, @@ -824,7 +821,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &S, registry: Option<&Registry>, -) -> Result, sp_consensus::Error> where +) -> Result, sp_consensus::Error> where B: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, C: 'static + ProvideRuntimeApi + BlockOf + ProvideCache + Send + Sync + AuxStore + HeaderBackend, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index f09e9b063c2..e6f880fe345 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -100,7 +100,7 @@ use sp_consensus::{ }; use sp_consensus_babe::inherents::BabeInherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; +use sp_consensus::import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}; use sc_client_api::{ backend::AuxStore, BlockchainEvents, ProvideUncles, @@ -967,9 +967,6 @@ where } } -/// The BABE import queue type. -pub type BabeImportQueue = BasicQueue>; - /// Register the babe inherent data provider, if not registered already. fn register_babe_inherent_data_provider( inherent_data_providers: &InherentDataProviders, @@ -1368,7 +1365,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, registry: Option<&Registry>, -) -> ClientResult> where +) -> ClientResult> where Inner: BlockImport> + Send + Sync + 'static, Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 583fb088060..67d5603fdc5 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -27,6 +27,7 @@ sp-version = { version = "2.0.0-rc5", path = "../../version" } sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } sp-utils = { version = "2.0.0-rc5", path = "../../utils" } sp-trie = { version = "2.0.0-rc5", path = "../../trie" } +sp-api = { version = "2.0.0-rc5", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 94228a26638..9d25786441a 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -40,6 +40,11 @@ use crate::{ }; pub use basic_queue::BasicQueue; +/// A commonly-used Import Queue type. +/// +/// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. +pub type DefaultImportQueue = BasicQueue>; + mod basic_queue; pub mod buffered_link; diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index da23172783c..04b65a723e4 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -56,6 +56,7 @@ pub use block_import::{ }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; +pub use import_queue::DefaultImportQueue; /// Block status. #[derive(Debug, PartialEq, Eq)] -- GitLab From 5c34fe49f8fccb1f0e0f983bb7f8cbb8850b574b Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 30 Jul 2020 14:58:30 +0200 Subject: [PATCH 028/132] Ignore flaky test (#6767) --- client/network/src/service/tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index f0982e30d99..0bfe507599c 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -345,7 +345,9 @@ fn lots_of_incoming_peers_works() { }); } +// TODO: this test is at the moment ignored because of https://github.com/paritytech/substrate/issues/6766 #[test] +#[ignore] fn notifications_back_pressure() { // Node 1 floods node 2 with notifications. Random sleeps are done on node 2 to simulate the // node being busy. We make sure that all notifications are received. -- GitLab From 910f065326ceb36410abfe98bcacec0607177277 Mon Sep 17 00:00:00 2001 From: Garrett MacDonald <1791145+garrettian@users.noreply.github.com> Date: Thu, 30 Jul 2020 21:23:25 +0800 Subject: [PATCH 029/132] =?UTF-8?q?Add=20"=E2=9C=85=20Successfully=20mined?= =?UTF-8?q?=20block"=20log=20message=20(#6764)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/consensus/pow/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 8c15528795c..42d1bc05019 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -648,6 +648,8 @@ fn mine_loop( } }; + log::info!("✅ Successfully mined block: {}", best_hash); + let (hash, seal) = { let seal = DigestItem::Seal(POW_ENGINE_ID, seal); let mut header = header.clone(); -- GitLab From 9e779ab7158afc0d58cedbd71be1e5806f3deaee Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 30 Jul 2020 15:52:16 +0200 Subject: [PATCH 030/132] pallet-evm: add builtin support for the four basic Ethereum precompiles (#6743) * pallet-evm: add builtin support for the four basic Ethereum precompiles * linear_cost -> ensure_linear_cost to directly return OutOfGas error --- Cargo.lock | 12 +++++ frame/evm/Cargo.toml | 2 + frame/evm/src/lib.rs | 2 +- frame/evm/src/precompiles.rs | 99 +++++++++++++++++++++++++++++++++++- 4 files changed, 113 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 574e4f48025..710ce8d7b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4391,6 +4391,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "primitive-types", + "ripemd160", "rlp", "serde", "sha3", @@ -5954,6 +5955,17 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "ripemd160" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + [[package]] name = "rle-decode-fast" version = "1.0.1" diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index a5ec28ddf9c..43ecc6f3688 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -27,6 +27,7 @@ rlp = { version = "0.4", default-features = false } evm = { version = "0.17", default-features = false } sha3 = { version = "0.8", default-features = false } impl-trait-for-tuples = "0.1" +ripemd160 = { version = "0.9", default-features = false } [features] default = ["std"] @@ -45,4 +46,5 @@ std = [ "primitive-types/std", "evm/std", "pallet-timestamp/std", + "ripemd160/std", ] diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 0cbeac6fe2d..0dcc4526c7f 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -21,8 +21,8 @@ #![cfg_attr(not(feature = "std"), no_std)] mod backend; -mod precompiles; mod tests; +pub mod precompiles; pub use crate::precompiles::{Precompile, Precompiles}; pub use crate::backend::{Account, Log, Vicinity, Backend}; diff --git a/frame/evm/src/precompiles.rs b/frame/evm/src/precompiles.rs index a6a10d45a20..987724285d7 100644 --- a/frame/evm/src/precompiles.rs +++ b/frame/evm/src/precompiles.rs @@ -15,9 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::vec::Vec; +//! Builtin precompiles. + +use sp_std::{cmp::min, vec::Vec}; use sp_core::H160; use evm::{ExitError, ExitSucceed}; +use ripemd160::Digest; use impl_trait_for_tuples::impl_for_tuples; /// Custom precompiles to be used by EVM engine. @@ -67,3 +70,97 @@ impl Precompiles for Tuple { None } } + +/// Linear gas cost +fn ensure_linear_cost( + target_gas: Option, + len: usize, + base: usize, + word: usize +) -> Result { + let cost = base.checked_add( + word.checked_mul(len.saturating_add(31) / 32).ok_or(ExitError::OutOfGas)? + ).ok_or(ExitError::OutOfGas)?; + + if let Some(target_gas) = target_gas { + if cost > target_gas { + return Err(ExitError::OutOfGas) + } + } + + Ok(cost) +} + +/// The identity precompile. +pub struct Identity; + +impl Precompile for Identity { + fn execute( + input: &[u8], + target_gas: Option, + ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { + let cost = ensure_linear_cost(target_gas, input.len(), 15, 3)?; + + Ok((ExitSucceed::Returned, input.to_vec(), cost)) + } +} + +/// The ecrecover precompile. +pub struct ECRecover; + +impl Precompile for ECRecover { + fn execute( + i: &[u8], + target_gas: Option, + ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { + let cost = ensure_linear_cost(target_gas, i.len(), 3000, 0)?; + + let mut input = [0u8; 128]; + input[..min(i.len(), 128)].copy_from_slice(&i[..min(i.len(), 128)]); + + let mut msg = [0u8; 32]; + let mut sig = [0u8; 65]; + + msg[0..32].copy_from_slice(&input[0..32]); + sig[0..32].copy_from_slice(&input[64..96]); + sig[32..64].copy_from_slice(&input[96..128]); + sig[64] = input[63]; + + let pubkey = sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) + .map_err(|_| ExitError::Other("Public key recover failed"))?; + let mut address = sp_io::hashing::keccak_256(&pubkey); + address[0..12].copy_from_slice(&[0u8; 12]); + + Ok((ExitSucceed::Returned, address.to_vec(), cost)) + } +} + +/// The ripemd precompile. +pub struct Ripemd160; + +impl Precompile for Ripemd160 { + fn execute( + input: &[u8], + target_gas: Option, + ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { + let cost = ensure_linear_cost(target_gas, input.len(), 600, 120)?; + + let ret = ripemd160::Ripemd160::digest(input).to_vec(); + Ok((ExitSucceed::Returned, ret, cost)) + } +} + +/// The sha256 precompile. +pub struct Sha256; + +impl Precompile for Sha256 { + fn execute( + input: &[u8], + target_gas: Option, + ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { + let cost = ensure_linear_cost(target_gas, input.len(), 60, 12)?; + + let ret = sp_io::hashing::sha2_256(input); + Ok((ExitSucceed::Returned, ret.to_vec(), cost)) + } +} -- GitLab From ffe4db94aeb2d6c7aa58363a601b3efb0570bdeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 30 Jul 2020 16:19:34 +0200 Subject: [PATCH 031/132] Rename task name to stick to the default naming scheme (#6768) --- client/rpc/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 4b2bd200a84..22dccbaa10a 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -57,7 +57,7 @@ impl Executor + Send>> for SubscriptionTas &self, future: Box + Send>, ) -> Result<(), ExecuteError + Send>>> { - self.0.spawn("substrate_rpc_subscription", future.compat().map(drop).boxed()); + self.0.spawn("substrate-rpc-subscription", future.compat().map(drop).boxed()); Ok(()) } } -- GitLab From bff302d5aa3a4ce94664954e4e8527309290cf8c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 30 Jul 2020 17:07:27 +0200 Subject: [PATCH 032/132] BABE slot and epoch event notifications (#6563) * BabeWorker -> BabeSlotWorker * SlotWorker::notify_slot: similar to claim_slot, but called no matter authoring * Wrap the future with a new struct BabeWorker * Add type definition slot_notification_sinks * Function slot_notification_streams for the receiver side * Get a handle of slot_notification_sinks in BabeSlotWorker * Implement notify_slot * Switch to use bounded mpsc * Do not drop the sink when channel is full Only skip sending the message and emit a warning, because it is recoverable. * Fix future type bounds * Add must_use and sink type alias --- Cargo.lock | 2 + client/consensus/babe/Cargo.toml | 2 + client/consensus/babe/src/lib.rs | 87 +++++++++++++++++++++++++++---- client/consensus/slots/src/lib.rs | 11 ++++ 4 files changed, 92 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 710ce8d7b30..525ea78d9d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6389,6 +6389,7 @@ dependencies = [ "pdqselect", "rand 0.7.3", "rand_chacha 0.2.2", + "retain_mut", "sc-block-builder", "sc-client-api", "sc-consensus-epochs", @@ -6415,6 +6416,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", + "sp-utils", "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 6ac8ca165eb..1b6b705139c 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -37,6 +37,7 @@ sp-consensus-vrf = { version = "0.8.0-rc5", path = "../../../primitives/consensu sc-consensus-uncles = { version = "0.8.0-rc5", path = "../uncles" } sc-consensus-slots = { version = "0.8.0-rc5", path = "../slots" } sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc5", path = "../../../primitives/utils" } fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} futures = "0.3.4" @@ -48,6 +49,7 @@ rand = "0.7.2" merlin = "2.0" pdqselect = "0.1.0" derive_more = "0.99.2" +retain_mut = "0.1.1" [dev-dependencies] sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index e6f880fe345..951d1467b49 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -106,6 +106,8 @@ use sc_client_api::{ BlockchainEvents, ProvideUncles, }; use sp_block_builder::BlockBuilder as BlockBuilderApi; +use futures::channel::mpsc::{channel, Sender, Receiver}; +use retain_mut::RetainMut; use futures::prelude::*; use log::{debug, info, log, trace, warn}; @@ -370,7 +372,7 @@ pub fn start_babe(BabeParams { babe_link, can_author_with, }: BabeParams) -> Result< - impl futures::Future, + BabeWorker, sp_consensus::Error, > where B: BlockT, @@ -378,16 +380,18 @@ pub fn start_babe(BabeParams { + HeaderBackend + HeaderMetadata + Send + Sync + 'static, C::Api: BabeApi, SC: SelectChain + 'static, - E: Environment + Send + Sync, + E: Environment + Send + Sync + 'static, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, - SO: SyncOracle + Send + Sync + Clone, - CAW: CanAuthorWith + Send, + SO: SyncOracle + Send + Sync + Clone + 'static, + CAW: CanAuthorWith + Send + 'static, { let config = babe_link.config; - let worker = BabeWorker { + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let worker = BabeSlotWorker { client: client.clone(), block_import: Arc::new(Mutex::new(block_import)), env, @@ -395,6 +399,7 @@ pub fn start_babe(BabeParams { force_authoring, keystore, epoch_changes: babe_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), config: config.clone(), }; @@ -406,7 +411,7 @@ pub fn start_babe(BabeParams { )?; info!(target: "babe", "👶 Starting BABE Authorship worker"); - Ok(sc_consensus_slots::start_slot_worker( + let inner = sc_consensus_slots::start_slot_worker( config.0, select_chain, worker, @@ -414,10 +419,49 @@ pub fn start_babe(BabeParams { inherent_data_providers, babe_link.time_source, can_author_with, - )) + ); + Ok(BabeWorker { + inner: Box::pin(inner), + slot_notification_sinks, + }) +} + +/// Worker for Babe which implements `Future`. This must be polled. +#[must_use] +pub struct BabeWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: Arc, Epoch>)>>>>, +} + +impl BabeWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self + ) -> Receiver<(u64, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl futures::Future for BabeWorker { + type Output = (); + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut futures::task::Context + ) -> futures::task::Poll { + self.inner.as_mut().poll(cx) + } } -struct BabeWorker { +/// Slot notification sinks. +type SlotNotificationSinks = Arc::Hash, NumberFor, Epoch>)>>>>; + +struct BabeSlotWorker { client: Arc, block_import: Arc>, env: E, @@ -425,10 +469,11 @@ struct BabeWorker { force_authoring: bool, keystore: KeyStorePtr, epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, config: Config, } -impl sc_consensus_slots::SimpleSlotWorker for BabeWorker where +impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where B: BlockT, C: ProvideRuntimeApi + ProvideCache + @@ -502,6 +547,28 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork s } + fn notify_slot( + &self, + _parent_header: &B::Header, + slot_number: SlotNumber, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + self.slot_notification_sinks.lock() + .retain_mut(|sink| { + match sink.try_send((slot_number, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => { + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false + } + }, + } + }); + } + fn pre_digest_data( &self, _slot_number: u64, @@ -599,7 +666,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork } } -impl SlotWorker for BabeWorker where +impl SlotWorker for BabeSlotWorker where B: BlockT, C: ProvideRuntimeApi + ProvideCache + diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7687d3114b3..7d346ffe395 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -104,6 +104,15 @@ pub trait SimpleSlotWorker { epoch_data: &Self::EpochData, ) -> Option; + /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we + /// need to author blocks or not. + fn notify_slot( + &self, + _header: &B::Header, + _slot_number: u64, + _epoch_data: &Self::EpochData, + ) { } + /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data( &self, @@ -191,6 +200,8 @@ pub trait SimpleSlotWorker { } }; + self.notify_slot(&chain_head, slot_number, &epoch_data); + let authorities_len = self.authorities_len(&epoch_data); if !self.force_authoring() && -- GitLab From 87063c3c00da34213379330bae3174aa0da7ad0f Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 30 Jul 2020 17:08:23 +0200 Subject: [PATCH 033/132] Update Balances Pallet to use `WeightInfo` (#6610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update balance benchmarks * Update weight functions * Remove user component * make componentless * Add support for `#[extra]` tag on benchmarks * Update balances completely * Apply suggestions from code review Co-authored-by: Alexander Theißen * Fix some tests * Maybe fix to test. Need approval from @tomusdrw this is okay * Make test better * keep weights conservative * Update macro for merge master * Add headers * Apply suggestions from code review Co-authored-by: Alexander Popiak Co-authored-by: Alexander Theißen Co-authored-by: Alexander Popiak --- bin/node/executor/tests/basic.rs | 18 ++- bin/node/executor/tests/submit_transaction.rs | 18 ++- bin/node/runtime/src/lib.rs | 8 +- bin/node/runtime/src/weights/mod.rs | 18 +++ .../runtime/src/weights/pallet_balances.rs | 47 +++++++ frame/balances/src/benchmarking.rs | 117 +++++++++++------- frame/balances/src/lib.rs | 30 ++--- frame/benchmarking/src/lib.rs | 65 ++++++++-- frame/benchmarking/src/utils.rs | 7 +- utils/frame/benchmarking-cli/src/command.rs | 1 + utils/frame/benchmarking-cli/src/lib.rs | 4 + 11 files changed, 245 insertions(+), 88 deletions(-) create mode 100644 bin/node/runtime/src/weights/mod.rs create mode 100644 bin/node/runtime/src/weights/pallet_balances.rs diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index f6dc1c3e7ea..0d69b970016 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -312,6 +312,9 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); + let transfer_weight = default_transfer_call().get_dispatch_info().weight; + let timestamp_weight = pallet_timestamp::Call::set::(Default::default()).get_dispatch_info().weight; + executor_call:: _>( &mut t, "Core_execute_block", @@ -327,9 +330,8 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - // timestamp set call with weight 8_000_000 + 2 read + 1 write event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 8_000_000 + 2 * 25_000_000 + 1 * 100_000_000, class: DispatchClass::Mandatory, ..Default::default() } + DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], }, @@ -349,9 +351,8 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - // Balance Transfer 70_000_000 + 1 Read + 1 Write event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 70_000_000 + 25_000_000 + 100_000_000, ..Default::default() } + DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], }, @@ -381,9 +382,8 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - // timestamp set call with weight 8_000_000 + 2 read + 1 write event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 8_000_000 + 2 * 25_000_000 + 1 * 100_000_000, class: DispatchClass::Mandatory, ..Default::default() } + DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } )), topics: vec![], }, @@ -405,9 +405,8 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - // Balance Transfer 70_000_000 + 1 Read + 1 Write event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 70_000_000 + 25_000_000 + 100_000_000, ..Default::default() } + DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], }, @@ -429,9 +428,8 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - // Balance Transfer 70_000_000 + 1 Read + 1 Write event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 70_000_000 + 25_000_000 + 100_000_000, ..Default::default() } + DispatchInfo { weight: transfer_weight, ..Default::default() } )), topics: vec![], }, diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index dd599a996a4..64c2deedac7 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -192,7 +192,7 @@ fn should_submit_signed_twice_from_all_accounts() { fn submitted_transaction_should_be_valid() { use codec::Encode; use frame_support::storage::StorageMap; - use sp_runtime::transaction_validity::{ValidTransaction, TransactionSource}; + use sp_runtime::transaction_validity::{TransactionSource, TransactionTag}; use sp_runtime::traits::StaticLookup; let mut t = new_test_ext(compact_code_unwrap(), false); @@ -228,14 +228,12 @@ fn submitted_transaction_should_be_valid() { >::insert(&address, account); // check validity - let res = Executive::validate_transaction(source, extrinsic); - - assert_eq!(res.unwrap(), ValidTransaction { - priority: 1_410_710_000_000, - requires: vec![], - provides: vec![(address, 0).encode()], - longevity: 2048, - propagate: true, - }); + let res = Executive::validate_transaction(source, extrinsic).unwrap(); + + // We ignore res.priority since this number can change based on updates to weights and such. + assert_eq!(res.requires, Vec::::new()); + assert_eq!(res.provides, vec![(address, 0).encode()]); + assert_eq!(res.longevity, 2048); + assert_eq!(res.propagate, true); }); } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 9dae66a1275..373a01b8ea2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -84,6 +84,9 @@ use impls::{CurrencyToVoteHandler, Author}; pub mod constants; use constants::{time::*, currency::*}; +/// Weights for pallets used in the runtime. +mod weights; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -322,7 +325,7 @@ impl pallet_balances::Trait for Runtime { type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Module; - type WeightInfo = (); + type WeightInfo = weights::pallet_balances::WeightInfo; } parameter_types! { @@ -1126,6 +1129,7 @@ impl_runtime_apis! { highest_range_values: Vec, steps: Vec, repeat: u32, + extra: bool, ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. @@ -1157,7 +1161,7 @@ impl_runtime_apis! { ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); + let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist, extra); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs new file mode 100644 index 00000000000..70e10d5342f --- /dev/null +++ b/bin/node/runtime/src/weights/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A list of the different weight modules for our runtime. + +pub mod pallet_balances; diff --git a/bin/node/runtime/src/weights/pallet_balances.rs b/bin/node/runtime/src/weights/pallet_balances.rs new file mode 100644 index 00000000000..21a90a97e63 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_balances.rs @@ -0,0 +1,47 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Weights for the Balances Pallet + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_balances::WeightInfo for WeightInfo { + fn transfer() -> Weight { + (65949000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer_keep_alive() -> Weight { + (46665000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_creating() -> Weight { + (27086000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_killing() -> Weight { + (33424000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (65343000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index a5f8e6fe36c..73547fe814a 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -28,46 +28,40 @@ use sp_runtime::traits::Bounded; use crate::Module as Balances; const SEED: u32 = 0; -const MAX_EXISTENTIAL_DEPOSIT: u32 = 1000; -const MAX_USER_INDEX: u32 = 1000; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 10; + benchmarks! { - _ { - let e in 2 .. MAX_EXISTENTIAL_DEPOSIT => (); - let u in 1 .. MAX_USER_INDEX => (); - } + _ { } // Benchmark `transfer` extrinsic with the worst possible conditions: // * Transfer will kill the sender account. // * Transfer will create the recipient account. transfer { - let u in ...; - let e in ...; - let existential_deposit = T::ExistentialDeposit::get(); - let caller = account("caller", u, SEED); + let caller = account("caller", 0, SEED); // Give some multiple of the existential deposit + creation fee + transfer fee - let balance = existential_deposit.saturating_mul(e.into()); + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&caller, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. - let recipient: T::AccountId = account("recipient", u, SEED); + let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - let transfer_amount = existential_deposit.saturating_mul((e - 1).into()) + 1.into(); - }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1.into(); + }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } // Benchmark `transfer` with the best possible condition: // * Both accounts exist and will continue to exist. + #[extra] transfer_best_case { - let u in ...; - let e in ...; - - let caller = account("caller", u, SEED); - let recipient: T::AccountId = account("recipient", u, SEED); + let caller = account("caller", 0, SEED); + let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). @@ -76,52 +70,80 @@ benchmarks! { // Give the recipient account existential deposit (thus their account already exists). let existential_deposit = T::ExistentialDeposit::get(); let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); - let transfer_amount = existential_deposit.saturating_mul(e.into()); - }: transfer(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) + verify { + assert!(!Balances::::free_balance(&caller).is_zero()); + assert!(!Balances::::free_balance(&recipient).is_zero()); + } // Benchmark `transfer_keep_alive` with the worst possible condition: // * The recipient account is created. transfer_keep_alive { - let u in ...; - let e in ...; - - let caller = account("caller", u, SEED); - let recipient = account("recipient", u, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); + let caller = account("caller", 0, SEED); + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); let existential_deposit = T::ExistentialDeposit::get(); - let transfer_amount = existential_deposit.saturating_mul(e.into()); - }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) + verify { + assert!(!Balances::::free_balance(&caller).is_zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + } // Benchmark `set_balance` coming from ROOT account. This always creates an account. - set_balance { - let u in ...; - let e in ...; - - let user: T::AccountId = account("user", u, SEED); + set_balance_creating { + let user: T::AccountId = account("user", 0, SEED); let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); - let balance_amount = existential_deposit.saturating_mul(e.into()); + let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); - }: _(RawOrigin::Root, user_lookup, balance_amount, balance_amount) + }: set_balance(RawOrigin::Root, user_lookup, balance_amount, balance_amount) + verify { + assert_eq!(Balances::::free_balance(&user), balance_amount); + assert_eq!(Balances::::reserved_balance(&user), balance_amount); + } // Benchmark `set_balance` coming from ROOT account. This always kills an account. set_balance_killing { - let u in ...; - let e in ...; - - let user: T::AccountId = account("user", u, SEED); + let user: T::AccountId = account("user", 0, SEED); let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); - let balance_amount = existential_deposit.saturating_mul(e.into()); + let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); - }: set_balance(RawOrigin::Root, user_lookup, 0.into(), 0.into()) + }: set_balance(RawOrigin::Root, user_lookup, Zero::zero(), Zero::zero()) + verify { + assert!(Balances::::free_balance(&user).is_zero()); + } + + // Benchmark `force_transfer` extrinsic with the worst possible conditions: + // * Transfer will kill the sender account. + // * Transfer will create the recipient account. + force_transfer { + let existential_deposit = T::ExistentialDeposit::get(); + let source: T::AccountId = account("source", 0, SEED); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + + // Give some multiple of the existential deposit + creation fee + transfer fee + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&source, balance); + + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1.into(); + }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) + verify { + assert_eq!(Balances::::free_balance(&source), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + } } #[cfg(test)] @@ -152,9 +174,9 @@ mod tests { } #[test] - fn transfer_set_balance() { + fn transfer_set_balance_creating() { ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance::()); + assert_ok!(test_benchmark_set_balance_creating::()); }); } @@ -164,4 +186,11 @@ mod tests { assert_ok!(test_benchmark_set_balance_killing::()); }); } + + #[test] + fn force_transfer() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_force_transfer::()); + }); + } } diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 0bd57e3828c..cc9d9d179fa 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -180,19 +180,19 @@ use frame_system::{self as system, ensure_signed, ensure_root}; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub trait WeightInfo { - fn transfer(u: u32, e: u32, ) -> Weight; - fn transfer_best_case(u: u32, e: u32, ) -> Weight; - fn transfer_keep_alive(u: u32, e: u32, ) -> Weight; - fn set_balance(u: u32, e: u32, ) -> Weight; - fn set_balance_killing(u: u32, e: u32, ) -> Weight; + fn transfer() -> Weight; + fn transfer_keep_alive() -> Weight; + fn set_balance_creating() -> Weight; + fn set_balance_killing() -> Weight; + fn force_transfer() -> Weight; } impl WeightInfo for () { - fn transfer(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn transfer_best_case(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn transfer_keep_alive(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn set_balance(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn set_balance_killing(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn transfer() -> Weight { 1_000_000_000 } + fn transfer_keep_alive() -> Weight { 1_000_000_000 } + fn set_balance_creating() -> Weight { 1_000_000_000 } + fn set_balance_killing() -> Weight { 1_000_000_000 } + fn force_transfer() -> Weight { 1_000_000_000 } } pub trait Subtrait: frame_system::Trait { @@ -462,7 +462,7 @@ decl_module! { /// - DB Weight: 1 Read and 1 Write to destination account /// - Origin account is already in memory, so no DB operations for them. /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 70_000_000] + #[weight = T::WeightInfo::transfer()] pub fn transfer( origin, dest: ::Source, @@ -491,7 +491,9 @@ decl_module! { /// - Killing: 35.11 µs /// - DB Weight: 1 Read, 1 Write to `who` /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 35_000_000] + #[weight = T::WeightInfo::set_balance_creating() // Creates a new account. + .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. + ] fn set_balance( origin, who: ::Source, @@ -533,7 +535,7 @@ decl_module! { /// - Same as transfer, but additional read and write because the source account is /// not assumed to be in the overlay. /// # - #[weight = T::DbWeight::get().reads_writes(2, 2) + 70_000_000] + #[weight = T::WeightInfo::force_transfer()] pub fn force_transfer( origin, source: ::Source, @@ -557,7 +559,7 @@ decl_module! { /// - Base Weight: 51.4 µs /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 50_000_000] + #[weight = T::WeightInfo::transfer_keep_alive()] pub fn transfer_keep_alive( origin, dest: ::Source, diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index bd0aabdaa3f..7ef274f25b1 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -188,6 +188,7 @@ macro_rules! benchmarks { { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) + ( ) $( $rest )* ); } @@ -210,6 +211,7 @@ macro_rules! benchmarks_instance { { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) + ( ) $( $rest )* ); } @@ -218,12 +220,34 @@ macro_rules! benchmarks_instance { #[macro_export] #[doc(hidden)] macro_rules! benchmarks_iter { + // detect and extract extra tag: + ( + { $( $instance:ident )? } + { $( $where_clause:tt )* } + { $( $common:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + #[extra] + $name:ident + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $instance)? } + { $( $where_clause )* } + { $( $common )* } + ( $( $names )* ) + ( $( $names_extra )* $name ) + $name + $( $rest )* + } + }; // mutation arm: ( { $( $instance:ident )? } { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* @@ -233,6 +257,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* ) + ( $( $names_extra )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) verify $postcode $( $rest )* @@ -244,6 +269,7 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* @@ -253,6 +279,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* ) + ( $( $names_extra )* ) $name { $( $code )* }: { < Call as $crate::frame_support::traits::UnfilteredDispatchable @@ -270,6 +297,7 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $eval:block verify $postcode:block $( $rest:tt )* @@ -297,6 +325,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* { $( $instance )? } $name ) + ( $( $names_extra )* ) $( $rest )* ); }; @@ -306,9 +335,19 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) ) => { - $crate::selected_benchmark!( { $( $where_clause)* } { $( $instance)? } $( $names )* ); - $crate::impl_benchmark!( { $( $where_clause )* } { $( $instance)? } $( $names )* ); + $crate::selected_benchmark!( + { $( $where_clause)* } + { $( $instance)? } + $( $names )* + ); + $crate::impl_benchmark!( + { $( $where_clause )* } + { $( $instance)? } + ( $( $names )* ) + ( $( $names_extra ),* ) + ); }; // add verify block to _() format ( @@ -316,6 +355,7 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { @@ -324,6 +364,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* ) + ( $( $names_extra )* ) $name { $( $code )* }: _ ( $origin $( , $arg )* ) verify { } $( $rest )* @@ -335,6 +376,7 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { @@ -343,6 +385,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* ) + ( $( $names_extra )* ) $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) verify { } $( $rest )* @@ -354,6 +397,7 @@ macro_rules! benchmarks_iter { { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) $name:ident { $( $code:tt )* }: $eval:block $( $rest:tt )* ) => { @@ -362,6 +406,7 @@ macro_rules! benchmarks_iter { { $( $where_clause )* } { $( $common )* } ( $( $names )* ) + ( $( $names_extra )* ) $name { $( $code )* }: $eval verify { } $( $rest )* @@ -659,14 +704,20 @@ macro_rules! impl_benchmark { ( { $( $where_clause:tt )* } { $( $instance:ident )? } - $( { $( $name_inst:ident )? } $name:ident )* + ( $( { $( $name_inst:ident )? } $name:ident )* ) + ( $( $name_extra:ident ),* ) ) => { impl, I: Instance)? > $crate::Benchmarking<$crate::BenchmarkResults> for Module where T: frame_system::Trait, $( $where_clause )* { - fn benchmarks() -> Vec<&'static [u8]> { - vec![ $( stringify!($name).as_ref() ),* ] + fn benchmarks(extra: bool) -> Vec<&'static [u8]> { + let mut all = vec![ $( stringify!($name).as_ref() ),* ]; + if !extra { + let extra = [ $( stringify!($name_extra).as_ref() ),* ]; + all.retain(|x| !extra.contains(x)); + } + all } fn run_benchmark( @@ -931,10 +982,10 @@ macro_rules! impl_benchmark_test { macro_rules! add_benchmark { ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); - let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist) = $params; + let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist, extra) = $params; if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { - for benchmark in $( $location )*::benchmarks().into_iter() { + for benchmark in $( $location )*::benchmarks(extra).into_iter() { $batches.push($crate::BenchmarkBatch { results: $( $location )*::run_benchmark( benchmark, diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index e5d40c74432..7ed9a862a04 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -66,6 +66,7 @@ sp_api::decl_runtime_apis! { highest_range_values: Vec, steps: Vec, repeat: u32, + extra: bool, ) -> Result, RuntimeString>; } } @@ -112,7 +113,11 @@ pub trait Benchmarking { pub trait Benchmarking { /// Get the benchmarks available for this pallet. Generally there is one benchmark per /// extrinsic, so these are sometimes just called "extrinsics". - fn benchmarks() -> Vec<&'static [u8]>; + /// + /// Parameters + /// - `extra`: Also return benchmarks marked "extra" which would otherwise not be + /// needed for weight calculation. + fn benchmarks(extra: bool) -> Vec<&'static [u8]>; /// Run the benchmarks for this pallet. /// diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 7df23f8dbfc..553b68c453f 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -75,6 +75,7 @@ impl BenchmarkCmd { self.highest_range_values.clone(), self.steps.clone(), self.repeat, + self.extra, ).encode(), extensions, &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 8a53c9fd8b1..c2a228fc86a 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -72,6 +72,10 @@ pub struct BenchmarkCmd { #[structopt(long)] pub heap_pages: Option, + /// Display and run extra benchmarks that would otherwise not be needed for weight construction. + #[structopt(long)] + pub extra: bool, + #[allow(missing_docs)] #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, -- GitLab From c51455b9ce6901fb1624bcfee22724f7a6f69c6e Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 30 Jul 2020 17:59:07 +0200 Subject: [PATCH 034/132] Fix graceful shutdown skipped if future ends with error (#6769) * Initial commit Forked at: 5c34fe49f8fccb1f0e0f983bb7f8cbb8850b574b Parent branch: origin/master * Fix graceful shutdown skipped if future ends with error * apply suggestion --- client/cli/src/runner.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 219613e6bdd..bdbf55eb832 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -229,10 +229,9 @@ impl Runner { ) -> Result<()> { self.print_node_infos(); let mut task_manager = initialise(self.config)?; - self.tokio_runtime.block_on(main(task_manager.future().fuse())) - .map_err(|e| e.to_string())?; + let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); self.tokio_runtime.block_on(task_manager.clean_shutdown()); - Ok(()) + res.map_err(|e| e.to_string().into()) } /// A helper function that runs a command with the configuration of this node -- GitLab From 1ec0ba8633597c49d617760045293d2906724435 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Fri, 31 Jul 2020 10:29:39 +0200 Subject: [PATCH 035/132] Fix link (#6775) --- frame/staking/reward-curve/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index 9b55b346d5f..275669fe26b 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -29,7 +29,7 @@ use syn::parse::{Parse, ParseStream}; /// Accepts a number of expressions to create a instance of PiecewiseLinear which represents the /// NPoS curve (as detailed -/// [here](http://research.web3.foundation/en/latest/polkadot/Token%20Economics/#inflation-model)) +/// [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model)) /// for those parameters. Parameters are: /// - `min_inflation`: the minimal amount to be rewarded between validators, expressed as a fraction /// of total issuance. Known as `I_0` in the literature. -- GitLab From 9ae3a1ce15dfaaf4187152d90f39d7be66b51bbf Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 31 Jul 2020 14:32:13 +0200 Subject: [PATCH 036/132] Allow blacklisting blocks from being finalized again after block revert (#6301) * Allow blacklisting blocks from being finalized again after block revert * Use BlockRules for storing unfinalized and add have_state_at in revert * Move finalization_check in finalize_block upward * Directly mark finalization blacklist as badblocks * Remove obselete comment --- Cargo.lock | 1 + client/api/src/backend.rs | 18 +++-- client/api/src/in_mem.rs | 11 ++-- client/db/Cargo.toml | 1 + client/db/src/lib.rs | 69 ++++++++++++++------ client/light/src/backend.rs | 9 ++- client/service/src/chain_ops/revert_chain.rs | 4 +- client/service/src/client/block_rules.rs | 9 ++- client/service/src/client/client.rs | 33 ++++++---- 9 files changed, 105 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 525ea78d9d4..b86507cf072 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6309,6 +6309,7 @@ dependencies = [ "sc-client-api", "sc-executor", "sc-state-db", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-core", diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 9482a6118d7..efc5ca4ee8c 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -19,7 +19,7 @@ //! Substrate Client data backend use std::sync::Arc; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use sp_core::ChangesTrieConfigurationRange; use sp_core::offchain::{OffchainStorage,storage::OffchainOverlayedChanges}; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -418,7 +418,10 @@ pub trait Backend: AuxStore + Send + Sync { ) -> sp_blockchain::Result<()>; /// Commit block insertion. - fn commit_operation(&self, transaction: Self::BlockImportOperation) -> sp_blockchain::Result<()>; + fn commit_operation( + &self, + transaction: Self::BlockImportOperation, + ) -> sp_blockchain::Result<()>; /// Finalize block with given Id. /// @@ -449,16 +452,17 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns state backend with post-state of given block. fn state_at(&self, block: BlockId) -> sp_blockchain::Result; - /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set - /// it will attempt to revert past any finalized block, this is unsafe and - /// can potentially leave the node in an inconsistent state. + /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to + /// revert past any finalized block, this is unsafe and can potentially leave the node in an + /// inconsistent state. /// - /// Returns the number of blocks that were successfully reverted. + /// Returns the number of blocks that were successfully reverted and the list of finalized + /// blocks that has been reverted. fn revert( &self, n: NumberFor, revert_finalized: bool, - ) -> sp_blockchain::Result>; + ) -> sp_blockchain::Result<(NumberFor, HashSet)>; /// Insert auxiliary data into key-value store. fn insert_aux< diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 7d27326678f..306c3c2b2f1 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -18,7 +18,7 @@ //! In memory client backend -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::ptr; use std::sync::Arc; use parking_lot::RwLock; @@ -646,7 +646,10 @@ impl backend::Backend for Backend where Block::Hash Ok(()) } - fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { + fn commit_operation( + &self, + operation: Self::BlockImportOperation, + ) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { self.blockchain.finalize_header(block, justification)?; @@ -722,8 +725,8 @@ impl backend::Backend for Backend where Block::Hash &self, _n: NumberFor, _revert_finalized: bool, - ) -> sp_blockchain::Result> { - Ok(Zero::zero()) + ) -> sp_blockchain::Result<(NumberFor, HashSet)> { + Ok((Zero::zero(), HashSet::new())) } fn get_import_lock(&self) -> &RwLock<()> { diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index c26f7121493..50e14fcaae6 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -24,6 +24,7 @@ codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive blake2-rfc = "0.2.18" sc-client-api = { version = "2.0.0-rc5", path = "../api" } +sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 086db73728f..d854c80bf35 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -50,8 +50,7 @@ mod subdb; use std::sync::Arc; use std::path::{Path, PathBuf}; use std::io; -use std::collections::HashMap; - +use std::collections::{HashMap, HashSet}; use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, @@ -70,6 +69,7 @@ use parking_lot::RwLock; use sp_core::ChangesTrieConfiguration; use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_arithmetic::traits::Saturating; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, @@ -962,6 +962,7 @@ impl Backend { // TODO: ensure best chain contains this block. let number = *header.number(); self.ensure_sequential_finalization(header, last_finalized)?; + self.note_finalized( transaction, false, @@ -1015,9 +1016,10 @@ impl Backend { Ok(()) } - fn try_commit_operation(&self, mut operation: BlockImportOperation) - -> ClientResult<()> - { + fn try_commit_operation( + &self, + mut operation: BlockImportOperation, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); let mut finalization_displaced_leaves = None; @@ -1404,7 +1406,10 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } - fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { + fn commit_operation( + &self, + operation: Self::BlockImportOperation, + ) -> ClientResult<()> { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); @@ -1420,9 +1425,11 @@ impl sc_client_api::backend::Backend for Backend { } } - fn finalize_block(&self, block: BlockId, justification: Option) - -> ClientResult<()> - { + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); let hash = self.blockchain.expect_block_hash_from_id(&block)?; let header = self.blockchain.expect_header(block)?; @@ -1488,7 +1495,13 @@ impl sc_client_api::backend::Backend for Backend { }) } - fn revert(&self, n: NumberFor, revert_finalized: bool) -> ClientResult> { + fn revert( + &self, + n: NumberFor, + revert_finalized: bool, + ) -> ClientResult<(NumberFor, HashSet)> { + let mut reverted_finalized = HashSet::new(); + let mut best_number = self.blockchain.info().best_number; let mut best_hash = self.blockchain.info().best_hash; @@ -1507,18 +1520,28 @@ impl sc_client_api::backend::Backend for Backend { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); + let removed_number = best_number; + let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( + || sp_blockchain::Error::UnknownBlock( + format!("Error reverting to {}. Block hash not found.", best_number)))?; + let removed_hash = removed.hash(); + + let prev_number = best_number.saturating_sub(One::one()); + let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else( + || sp_blockchain::Error::UnknownBlock( + format!("Error reverting to {}. Block hash not found.", best_number)) + )?; + + if !self.have_state_at(&prev_hash, prev_number) { + return Ok(c.saturated_into::>()) + } + match self.storage.state_db.revert_one() { Some(commit) => { apply_state_commit(&mut transaction, commit); - let removed_number = best_number; - let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; - best_number -= One::one(); // prev block - best_hash = self.blockchain.hash(best_number)?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; + best_number = prev_number; + best_hash = prev_hash; let update_finalized = best_number < finalized; @@ -1531,7 +1554,12 @@ impl sc_client_api::backend::Backend for Backend { ), )?; if update_finalized { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, key.clone()); + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_BLOCK, + key.clone() + ); + reverted_finalized.insert(removed_hash); } transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); @@ -1562,7 +1590,7 @@ impl sc_client_api::backend::Backend for Backend { revert_leaves()?; - Ok(reverted) + Ok((reverted, reverted_finalized)) } fn blockchain(&self) -> &BlockchainDb { @@ -1986,7 +2014,6 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 2cf994d3f59..be7953e528b 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -19,7 +19,7 @@ //! Light client backend. Only stores headers and justifications of blocks. //! Everything else is requested from full nodes on demand. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use parking_lot::RwLock; @@ -146,7 +146,10 @@ impl ClientBackend for Backend> Ok(()) } - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { + fn commit_operation( + &self, + mut operation: Self::BlockImportOperation, + ) -> ClientResult<()> { if !operation.finalized_blocks.is_empty() { for block in operation.finalized_blocks { self.blockchain.storage().finalize_header(block)?; @@ -231,7 +234,7 @@ impl ClientBackend for Backend> &self, _n: NumberFor, _revert_finalized: bool, - ) -> ClientResult> { + ) -> ClientResult<(NumberFor, HashSet)> { Err(ClientError::NotAvailableOnLightClient) } diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index 129aea04086..eaee2c03f9b 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -34,10 +34,10 @@ where let reverted = backend.revert(blocks, false)?; let info = client.usage_info().chain; - if reverted.is_zero() { + if reverted.0.is_zero() { info!("There aren't any non-finalized blocks to revert."); } else { - info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); + info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); } Ok(()) } diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs index e862379a564..be84614c2a5 100644 --- a/client/service/src/client/block_rules.rs +++ b/client/service/src/client/block_rules.rs @@ -30,7 +30,7 @@ use sc_client_api::{ForkBlocks, BadBlocks}; pub enum LookupResult { /// Specification rules do not contain any special rules about this block NotSpecial, - /// The bock is known to be bad and should not be imported + /// The block is known to be bad and should not be imported KnownBad, /// There is a specified canonical block hash for the given height Expected(B::Hash) @@ -57,6 +57,11 @@ impl BlockRules { } } + /// Mark a new block as bad. + pub fn mark_bad(&mut self, hash: B::Hash) { + self.bad.insert(hash); + } + /// Check if there's any rule affecting the given block. pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { @@ -66,7 +71,7 @@ impl BlockRules { } if self.bad.contains(hash) { - return LookupResult::KnownBad; + return LookupResult::KnownBad } LookupResult::NotSpecial diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b152415a4a8..d0859f4ee03 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1054,20 +1054,31 @@ impl Client where /// reverted past the last finalized block. Returns the number of blocks /// that were successfully reverted. pub fn revert(&self, n: NumberFor) -> sp_blockchain::Result> { - Ok(self.backend.revert(n, false)?) + let (number, _) = self.backend.revert(n, false)?; + Ok(number) } - /// Attempts to revert the chain by `n` blocks disregarding finality. This - /// method will revert any finalized blocks as requested and can potentially - /// leave the node in an inconsistent state. Other modules in the system that - /// persist data and that rely on finality (e.g. consensus parts) will be - /// unaffected by the revert. Use this method with caution and making sure - /// that no other data needs to be reverted for consistency aside from the - /// block data. + /// Attempts to revert the chain by `n` blocks disregarding finality. This method will revert + /// any finalized blocks as requested and can potentially leave the node in an inconsistent + /// state. Other modules in the system that persist data and that rely on finality + /// (e.g. consensus parts) will be unaffected by the revert. Use this method with caution and + /// making sure that no other data needs to be reverted for consistency aside from the block + /// data. If `blacklist` is set to true, will also blacklist reverted blocks from finalizing + /// again. The blacklist is reset upon client restart. /// /// Returns the number of blocks that were successfully reverted. - pub fn unsafe_revert(&self, n: NumberFor) -> sp_blockchain::Result> { - Ok(self.backend.revert(n, true)?) + pub fn unsafe_revert( + &mut self, + n: NumberFor, + blacklist: bool, + ) -> sp_blockchain::Result> { + let (number, reverted) = self.backend.revert(n, true)?; + if blacklist { + for b in reverted { + self.block_rules.mark_bad(b); + } + } + Ok(number) } /// Get blockchain info. @@ -1921,7 +1932,7 @@ impl BlockBackend for Client fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { self.backend.blockchain().hash(number) - } + } } impl backend::AuxStore for Client -- GitLab From 64543a310e65164b6e0185612d61fe0c9d085b92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 31 Jul 2020 14:58:38 +0200 Subject: [PATCH 037/132] Order delta before calculating the storage root (#6780) We need to order the delta before calculating the storage root, because the order is important if the storage root is calculated using a storage proof. The problem is arises when the delta is different than at the time the storage root was recorded, because we may require a different node that is not part of the proof and so, the storage root can not be calculated. The problem is solved by always order the delta to use the same order when calculating the storage root while recording the stroage proof and when calculating the storage root using the storage proof. To prevent this bug in future again, a regression test is added. Fixes: https://github.com/paritytech/cumulus/issues/146 --- primitives/trie/src/lib.rs | 54 ++++++++++++++----- primitives/trie/test-res/invalid-delta-order | Bin 0 -> 3476 bytes primitives/trie/test-res/proof | Bin 0 -> 4680 bytes primitives/trie/test-res/storage_root | 1 + primitives/trie/test-res/valid-delta-order | Bin 0 -> 3476 bytes 5 files changed, 41 insertions(+), 14 deletions(-) create mode 100644 primitives/trie/test-res/invalid-delta-order create mode 100644 primitives/trie/test-res/proof create mode 100644 primitives/trie/test-res/storage_root create mode 100644 primitives/trie/test-res/valid-delta-order diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 5ab06cecca6..73a4a8029b2 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -186,6 +186,9 @@ pub fn delta_trie_root( { let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + let mut delta = delta.into_iter().collect::>(); + delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); + for (key, change) in delta { match change.borrow() { Some(val) => trie.insert(key.borrow(), val.borrow())?, @@ -259,19 +262,12 @@ pub fn child_delta_trie_root( // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); - { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; - - for (key, change) in delta { - match change.borrow() { - Some(val) => trie.insert(key.borrow(), val.borrow())?, - None => trie.remove(key.borrow())?, - }; - } - } - - Ok(root) + let mut db = KeySpacedDBMut::new(&mut *db, keyspace); + delta_trie_root::( + &mut db, + root, + delta, + ) } /// Call `f` for all keys in a child trie. @@ -468,7 +464,7 @@ mod trie_constants { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Compact}; + use codec::{Encode, Decode, Compact}; use sp_core::Blake2Hasher; use hash_db::{HashDB, Hasher}; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; @@ -856,4 +852,34 @@ mod tests { ).is_err() ); } + + #[test] + fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { + let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); + let storage_root = sp_core::H256::decode( + &mut &include_bytes!("../test-res/storage_root")[..], + ).unwrap(); + // Delta order that is "invalid" so that it would require a different proof. + let invalid_delta = Vec::<(Vec, Option>)>::decode( + &mut &include_bytes!("../test-res/invalid-delta-order")[..], + ).unwrap(); + // Delta order that is "valid" + let valid_delta = Vec::<(Vec, Option>)>::decode( + &mut &include_bytes!("../test-res/valid-delta-order")[..], + ).unwrap(); + + let proof_db = proof.into_memory_db::(); + let first_storage_root = delta_trie_root::( + &mut proof_db.clone(), + storage_root, + valid_delta, + ).unwrap(); + let second_storage_root = delta_trie_root::( + &mut proof_db.clone(), + storage_root, + invalid_delta, + ).unwrap(); + + assert_eq!(first_storage_root, second_storage_root); + } } diff --git a/primitives/trie/test-res/invalid-delta-order b/primitives/trie/test-res/invalid-delta-order new file mode 100644 index 0000000000000000000000000000000000000000..e46f280dc29bb2cbc3a28faeabe51efc747ba4af GIT binary patch literal 3476 zcmai1XHXN^7AAm-)PR6AS(GBurAP^#rT0Z6NDaLOLP9S~K&n9yk>2ZuDlHhQU|1kj zl_o_>XoB<-K)^h=%+7m{AAA1XbIyGCd^2~>o%z0pwE`9LMjK#>g;1Ru+Enx5;}UI3 zjO#~M7a8{_S7;eywQ{zqrw(Q)Mefj$k&z`RIQT*RJpB;1{t!<$M~JVRx0kK6pO?d* z2@C>*z;ALCWXS&~kghx$ke=Yv2ppQ(ON`1*1E}`%ydeKE{GUJ~ zffI526*R_;p{Ks7>EY?P=)D*Q9&;^8HAvC^0DhPZxr=kcjaW?he1%Wb2dmxaZv8;j z9iQoVeC^`(S)_xV(`8!fjEjz()!)r!AA8#MzLqlq04aVDswy}I`oi735N-~(ZeET~ zftN?;|8#}r(0*tnYGH4J()zp=z|;KN4V)`hgVhfYlc0=g*P0)A@sBj{m&5A1nX@ceI5%ArKH3CxpV~ z%V=n6xumNxd#e0X?}3Ck@5_iMYyY+SHexI$*?!Gr+uf|`IpJ(0v+R}a#ycW4d0v&nt5J;uJV2$s4{?^#5a z6GdQeKNUrF6PL?a-^MK-v^2{0Zr+aLPZJdr{*G!bw4Ho8WIs#UaJQd12$$T%#l(wm zrs@^FkPhuImXmGx7)^`004Gf_4n5gHhI|t*RQ5kWhyC2%2Z@I-uKi4z&g_R@VBgrppi_B%SJmi zKen-x2~W(^qzl(3J2vZ&vKx_l(OTuQmJR?F+qj<|?su=tsXRy(2(Q0`7Z+dkXbhcF zO550ENn5+h!y%)EEg3f^a%oI@3acuIMzwY+KZx&}IL}G9_~fRiH^O4nl_f}}k!ux)si}7XtvNF_A6q)UWa{zq_DEMnowpRQv)etcwvJO1{q~?;B0F!< z@Y;<~HUMz#iU6&V&xL9-@eN+g7K}Sy(za1{$lRF-2$!4IwjItuUi4?&M;eDIAO8|3m(bXrE+&`||Qz(Q! zW{@W;hVv?fvSc%_+Z%SE<4wbQvU{6$gGM%Gz#Y9E&wcws-`)Mn-w5(H6mNd)Tx_!z3S6 z&38$Jt|re#=jwY6w?ADq_qxcJMzJxNoEQVfjnS46e(qokgN z&Ctayv^{X0jgH!!qt=GTajCa!Dk-67fr~Ve7R9U~e#`aN2FLieX2irKy&{&oxX{{z zw~nC)wKep~@!-Uj*Lm_rZDyq0OoPpy8EKsMQ+|iU>c`zVt+y#{TTwZt4mZ%&ceiG1 zV7l5XOlb-oKF0RqE2$+@+k_&N)Ig#Yv)h|Cv$ppZjykA8?TdZ$>ZYNX*Pz<1@Xsr^ zXcp5Y)py=c6X?6B()HviSUnH*qB8>*5cKmk39A6}uJX9E4Az8g!*=sPB+anNipf21 zm!N@g##Q;zObST7d*64(e1Z@~GOCyOu3$)o(fNc1&h?D+r3F+-KZg0RJzkIzw~C|{ z0&N^{Hfr4vws_95CY<_K7yCZ+%@KLKT+(M2TQgH(olI<6yoI3V$WDXiwq4sj*{mek z$HSFrLtpAQ10pAwZ)O|@fkpV-MJIsO*ke}iUY|mv%9e|%Vm{WdqwT4)HvH)&PQ@Ym z^C4dxev1%=vd2!YD^A}w^^!2xTV7r80yHiz6VJ->ToJ`t>NaSQ!<5ihrS5guxq>G= z2**u`b=KxNgRr4|bSniS!iLd|^3yst14VT)3A%!Vzvu3K%i!ImSk$#ZvbXZ7W9+-I1TJ}a$uK^0 zk`nHkNU9OeRd+LBKljU*is!#)=g45Y&4QLQdnr=B2D6&3GKJjp=ssC7^$mTIrYg95 zN}OrjVRLh9uH>l_F;|Ba1PV*PxKaxacR|}2G~4bGi8qc;n;Jv}sz((8?%+)|^0LoG z^%g4M)<3u}N~U5KZD%I2uPgOg;>JP<9jl&$8ip%*zK3}N&Sh2E{CL$(O#gE~{fP!5 zeM>!K?R{HQI*RJNbdi1+`;aaE@#Mbe>~7z6tP)4EY32n^XP@?F_9%HRxU{4*@~@Ga zXin-(v{`Tm*D`z*2FaxdM^CBYyBZTy`>4bqB{QK+Y@61c*r{6(@H6vxY4LoAv0Vir zr}QU}&gbFD7@?gum^yBYGRIjf0QqDwiVlMT(~C$|G}SuHfny_KiFATZ|o+5wG7(LUkaJoZT)LSxK)t>R=-l zx#S`LSLx3J8L46lnTCw8kL$1itR_&U-&2w~6hKoqGoP2ogB#D^cszY`T5EY+Jan3)&PLh~hn0Fr>0ycbp2L8K~X1*b$BOorX|zr(=2GRm2}P_xQZ z*R18*LaDB*s(-_nBQ1*olD?EX<}lu$2-!LV6&CMxzWPTXqphQNfY<+u zY5?#L(Rs|l-Fa`g4 X{$X-{*oOa#KRSO|AL_rmKdS!#%te!7 literal 0 HcmV?d00001 diff --git a/primitives/trie/test-res/proof b/primitives/trie/test-res/proof new file mode 100644 index 0000000000000000000000000000000000000000..278bea0bfbbc7fb16364eddbded70c884dd7ab71 GIT binary patch literal 4680 zcma)9WmMFSvt|JSSxRCB1eAuQq@=r3ctJvP>0FR*el)m*tJ1lYwAA8~N_QyTAt5c& zBJSV!z4xB;-uvm!r)TCoGiT16IdkUGP9n<3SHnj;eStWNx;(!_axYF5iv^U+3yET? zXX}o8(}ljar*9z^a61u|H@RLPNT5v5-Le3dvcC?h7U^bIj6U;!NfS*8Rw{0yu{&?k z(XqQKxYC^kES{;j&A*hQt{tVgAsa84l_F6!PGNZc^9Gz+Lj0ti5O^lKv9MSSk)K-EQc)1#3%WCT>~{L#1$KF(A8O7j~Pe8>OEZFO~%NF;Z3&X>j$a_ z^uYj{Z;pmqk-yjYTbovLHNY$D>8b)~Qi`zJewC=8k~|h*i(8f>zgJ;EW{w+ynW?aN zC;L!>@ZiVz7wKZjR#bZwU}KmA=VTXW>CB^f7B69|$jy~21DWLrgr-XDWEx*b0zRFe zkzi@lC#kr(eh8b=;6x{jSTT5Vm8#8=MCg{_Vnk=1$;yw%TbzfHN zUYe3N1@pFg-vUun#&;PjYzr6of*9$T1u!-huTo5Re6M-Od_@`f)yn|v@g%lYDsM%f zw#!`ldF@9Msqx3g5)~b(t;kU2kW&$$tezlizvcr2>B29|IfT4G_n`IfN-2&xmE6E* zgzFwxfK?TZj6}kcL3DRZno?juH~Um%HFrr3Kkcobu!dMnIuIYlO3RkO#lUq+bJZ_V z>eme!t?QQ*bP+@0e8X}*QvebLn!>{kjt1IB6wCRPS?vzlLj0PXx@mUk3|MwfqSbP& z?vCsZ#H(i46)f$cxE=Yg3@NnzVczb1KK9-|e4a2bn5`4c8BPNNaH4@a=D5xbt%ni` zc2fjDh|E-vPoYFqC2;%+hwMb!IomPd4ldswOy{iHVv7ou!BOPIhU56MiZX;azVlJf zyF*409VpWF6Aob7JDqt$!-%M=N^yHWDJN2sF-@Xurno*`2^v>*2z%+(pMotP_ogiQNOY1r2-CcHsCI9J7w}SFyM1BoYX|- z>tXU41RTsBX`0G<6CcwBP(f=jY7Mn3vXr$oU(!8LX6DUZwXz((g?o>0TYz^wJz><2 z1!--w^kyz35zjV=DCGE8rB=lm_rD5O`lwc~F3IEkAB*<$^02k{_O`dPvh(nV`@_8K ztlafruJXv2vU=GPCS$wP*W4TZu#5n?&3ha8Exf`7+HaV0=gflFjZrNgH zp4e8|7)m*OJ>L(S%Op^$IKsuB*P;RayUBl=_;=v{i`nr2-V9)Ji}D4T_4ABgaWlrd z%jH9H!EtQUHC&y@H>S#ca~luO{{(5JsVufGaOb|Nhojr^JjesqYeV;wzq5 zHh5WwG9SHMP<=%y5C};A#lc|r_Iu>81}?l;g**=;cBG8Qh`~OCQpX3?BbEU)bRWq- z*qA0CHEq>cM%zR1DtQjf<`}z-5xh?|H!Jqa3+Co*2lH|EfLl5E!fpQnf3LK=HyaL4 z0Zv^0pP|rkhvSe!|9TUsrWZfCk%dg03h<(rIPeRe=0M)t)__8MJ)sF)Dxd&O-sJC2 zu2{ydG9}HM*&k&#Xq(1D&(tF_w?dqp=ncB|Yd%EJ#bznwWBmGQOQTr-0dqWi8XrH; zZ$vA~YWDT0%@TeC=MZ&ZH?oO=l6A0|sGsjvB&6f1xLCuN7!vfYQOeA{(dWlN@J|7h ztlw!_sB-%NECFg?lAj#@**Ejz;!Ete3OD7xO*<*MQ;OS03%~mpZR67k^&jpJXH3lQ3vZCk-ja_6PIO1L5oxQ~wNKsz@3t6~%N_a|M=ONCb_$iKqj0`U zuh3wGfA#>p)eI9yz00Zy9M42M?zOw;$2ESU9K6U=Aq`iqigCNrFI-Bn=j4mUQ_nL zJzUe}5x-gr=;*ACt1aRc_WH#h>vY`iVsjF3pD^< zAnO8Y!YC_GRkQ(bfxrvSPELXaH9&szqiz&sotE##(6a`73z$hwJB+RTzNG&JkylQ7 zfw=Dxw)kO5u4{CW7pWYg;XaV8lcS02e$m9*Y)M*eO0Qhpm149bsM<{*%}UO--; z6UK_mV^}|DZN5>?g*6=)t7Ryrd4<9Kd2A*>ufO0)@m8h`UkAHLL!i0lp6YO2N;Jro z*~N4vdeFxZu99eZs$b3-J6J#(b_|v9x$Ij?&0bGQwcuSR&LJfGt5X=7 z^(d8=j*zP$f`-@ImW!gIPsrHR>4p>tyK+aKY#vJF#21x?C1S;abh30GNAmPK7XkZv z-9-yRJB5P}EQ{KDO1R}mPaMOan1c@674no?+!x6%h@hx%XpU22p@dVuV;+vd26*}1 z-3J202CuooMe`fQPwULP8)uJQ6_5r}0~w`-gL=Xn2Hqab)SVpY+uP0oC~|08whsj@ zyCj#mkPc^+457)U-nG%`C`-@}?aS_O`W`rbAZpan-{Jmb$|rHNDzNHlo}4z_W!wgZ ze2}(_xw#SjjIO_dI~-I}^2_K=x3J8&^C|m0z87)~Vmqq(4BVF)fr9*;ljwxLUzr2V zZwq-*kT=ma%~?#~6d|vLUZ$omk7qjPqrS5B-BvsAU2k>ZODmL~?-X_C48j>p?yzOSf$eA9Lqouh9{Rc66? zxK!h*p|(YtCfV`gsg1x^YVrKhMj=9YIN6-q`9qsQ+k~l|I$}ia_J>Jz(`f8_soKNv z>w1VD7p-$b8Be!qoo)rCZr@J72^G*`I7rU9x0PFSaiV z)>;kzX8SIJ&z&xAc2|0VSs(t`M00a{4GwBt-@Ldf&2#cD%2Ko}ld_$M?#uM-dj67h z^9t-{+K6*ddvpV%MZD0c2BC#WsjF;j%4Z#GehNbZ46}C##bD&#i$MV7M5bN3a>f8>E>mifWxrhY;{rP) zJ=t)vGTnr{sk+@x8VCMAa=(46Nb1eb5ykWv1a4;b+IZI`KXH5L=jdHz$5bNPOVY6W0|FPv(?GP1=aa=Qv)wY^`tb&#ppl* zw{*F%-c;`A?q`>Ep;Yv`)#5DXeWeyn{8aEp$F|#ulFnA1&siRaLupku`=I;*dML?P zYo_6Xtfl^5t)P`YDPDD6I`5F9P4FT6G;$z`&iS{xdGdHRc$MRNDXKrSPX@l`*phya zeFxhFzN*txqv<{+%y=O=f-5pIc}?u~U1?_i2O>F8#sHd$X;WR{zjh9!Sf-vXDO&A# zYF)9BQ}WRDQ&Kn*4Lxr2RO~*)&v8)o2cEA-lcLc^WW2%^O|`ZwMsX2eFO$o8;;Y!9 z(_`fQolh%qALW%hm!O`r&=n{8X!^!=my)nG4Dh~x)w^OpoXwm#Qr@{PAR17N;X1Ns z{}Jt2mQAEozo>|3-?-aWnCZz@bkL+**62Y?gf#I9lBrPTp#k^?D%uXmtofF*>Fp$W zM}PC95tUC`5&TlIl5tmY?<0UgXp7%1#f9p5uIu&FOoy*A?_e*Yu8BMaM)~#kJGOLT zC;J4K#$PnvL=F;vCTLY?OI4{O`5L+F8rpT1chNZotY1= z@f^VzWlt`A+G~<`r>l+;IA>_}X-*QC4oZl5kQeHHVlp^zp%lQ zhaXRIMKded9$LbCfRWbdFoPZMq39jcX-N1#T`@jQS+VM%=N9|RPY6YOS5a(~WJw5K zM;xR}BaR&Pdz=0;;2u#digHvJV_b&;VN@vOhTMdxLqOoV#nrsLN8M9wmA}3ls|(pQ z(}SESzG_l!3a~`S1PS+NjAZ5|(15@BZ%94%DxTnzJoZkN&I)=HsXZIY?8idRv^Dtv zxmL4HS=X%U)Pk=lFRw-Qls+ws9G3nycgl8pC>eHmBZVqD`ScA?z%jH;7=NPjgXyf~ zzVeAITvvs{?$3t_Y`0HgQT-hx&w59yohw(A=b{8&9gYvs*gO=u4)NKWDaQl8F#tNh zLIhew<}_9yi-|L?8cXt(A<3OTN@De_)r08GAOP?wb2Z@_{sx#ZgCvKp7`iX^9{ntA zPTM-esP?h)5M=_=DoJ5bZc1#Jb#c1&#V*NjSJxWOo~#3LWHz_;2ag8e4*QTHVr45} z17g);mNdk(YU~=O^O2@^l1OhC%rLPB%rVn6MR8hIh0|nz`3u|g13E|adh8s7|Hf$l z+Ns|g3E(IF6x~Im@K!}!cCF+dw3>B--cxK;kl;QQGQK8hmg8*HS5z$L zC|e`6CXr#kL^Iy&pR5$Stw;SWBo%=ntZ=57EFpIs?c1VZF%({dvr8oZL!b=$2mPJ!za$pU-;w_+L;T;|Kb{0$UWJ72 zp3&pA5cL|8RFkpGyGbTi#2L38IorJ^V#aRP{qM!~DDeJd0HyOU$ev1Ty!}uw6^l8N z-@{k9uRT-#D?m+>T<(GGwGTk)Z6=L_r~!s+OE~vfNIr{8bid5rdaP6(ub3Ky7r+VN zzkX<8l!|Y=&;?St&1ZbiZuf95*T8r^Mq_ykjE3m|E1MsOBz-O>qkfMRofzU)KA^vb yYgqA(s(;A~q$hH=15Q*ugw?8oPG<89c_3;8`+0P>DR-FH&-xKw5*Z|1p#K0WpTDL6 literal 0 HcmV?d00001 diff --git a/primitives/trie/test-res/storage_root b/primitives/trie/test-res/storage_root new file mode 100644 index 00000000000..758626802e1 --- /dev/null +++ b/primitives/trie/test-res/storage_root @@ -0,0 +1 @@ +=[42%JP hKw)R 0ԭu \ No newline at end of file diff --git a/primitives/trie/test-res/valid-delta-order b/primitives/trie/test-res/valid-delta-order new file mode 100644 index 0000000000000000000000000000000000000000..4df6e62ec133b05cab2b53f00c8a4286eef70865 GIT binary patch literal 3476 zcma)2vS3DfsoM45s+#SM5On+p-KyeDi{_B zRi#Oh5}F{r1Q0Oi?%nQh@Az;xpJwL$XWqg;1Ru+Enx5WAf6j zx~w+^#%7+TS^c_CtH>@9(v?R8(i40dfkQKUiBY*}0M&k;7vyArlS?7UF>V{yD=toL zQIKdL96t$|rTr6U3jRa;Js$9<4Uq&L%V?@@@8*PcVuq%8d-8V@6j zIKggnzZ_&Nsh!?TP;l^r`g!^xZ2cjgZjKONH*YUnXFo58KNJ`Q4uK*7(qv=>WNZ8s zWPo4gDE^NMwyLKNW++AO(EMoZ)n6CD;NA{S2!xZPt)q87_3oI`%oKV)1WnQ>KDh}91!1c(>Z1O)1d zB<{L;kZ!$0Yv`R#?xXE7_Kiib)NOjtBC?z)0(<+ZD5{&dT*mq~Zt7v}|C_|Nk@5KYH#ht8 zBr&9lxt9zZae}K8=9V|52=tQ|48uY1aZlT*3^X0vN6$jmTfpTiTmFxu)WTo7hRN3f z-L7+}HGnA(IcZj!A##PkW>o}@WMW)4+M)Tejh#$*VxA^lxHj3bS$~w>h}4VLDwnl% z0I1l;{q%6ZdtFZDL8?G_{T;lx_^L-^=#*00#wJVJ+EpG787*wdxG|ASW71PtRXH@O zwM+RyeBZ=*PP)Y>H$A;M<~BgRRX`=}GZU+iBiE@cK`M=0t2j(ey$fi~nW_2M((xrz zkC(Scx+?0trGTB??s2tsoSNvj2kjEsd5ebEZiKP{fNNJg@Q0|_N>0hA^RW`^6TIZx zdO9onDiqQRGl)QFaL>bLuZeC!zXS13Z>nYdq^w0F`1W@ubeQ!JMa0{-I(5#%(&-u` zMG)^aUHlNQa8;CL z^a44*ZBofkrMVta#c(O(=sZ& zVCFmS6Pe8(u5ir+n*-DGyD|6zx_|5l$@?Dn%%)~9CZ$@7EM7GXavoA9J_L6&WLnm# z^-p@2DJn*&R8R?p*E^KU6h`gQ^XahntNpJx;hLk`=9@b+1|}Nh<=YQj!XH=xw%XP5 zG+LlD^h7G5!Y>$s18T|m1JNB}0epj3`So>9@d2}!g5fgxjdBNdR=CE=9S^l+Gf-bf zSrOhuieQHDzRlbvfVsHn>Jv)tA6M=v6ha>}$P*RAc@;ugvYFTI4Li{BreQtVy-mA8 zBbzedj@}OFgE8NP<*J}6<2)4u_TyLrP&L@V-O38gF<}&72K54zmY#!Ca8gQNj>eqw zM154aWY@Gzx$Yfj1lbSJQ~4*6ES zicw$WofWmlcTP4yvc;}Ux*yz1Gw`7bO)D4+9rYuBll|=BDT2G`zIzeD6zw!+$KII3 zIOfS}5E}OGoSQoa4kFBGi{Q3BY+9pXk`JopyCgzaljovy^}UAMpRSsFUF1un*cePs zjEGZrZLp;H`}q~M*Vm)%^h>;Ic)Cn1K<(mBhU*Xy8M54cm$9F@Epdq<3ox3ca49V{ zMSrQz!WH4U5IN~H6t6P%K>6q^qT2XTQqRI>=;9XI9=Og%M{UkgYeVC>)Y~-8Akm81 z?M<6m+j|Q~9n_%q#lCrU(@@N7Q0-Ru=apMDi|LZ;JMX6n^j%cxdh!&ko`-tTnSl!k z`uUoKRe*U{dE8kBYr?i+yLljzX4quKohd_n`~dPe%v0xF~*!+h8tFUW{nMbZj^HV!x&wQdMoJm**wPJOG3eINSfh`e1c z=`)M1nW?Z&CN?eJLQr#Lr$KYuuI-*|Rub&v;mWk3FZG)NkrT`}GY*5mB7E+m6ToWh zF)MekPoYs|%f(bNAM4lA_S9J${`3;3;t>7$kS`9uMuY<0iy9YxA5z*ib&Yl>!lA!)QkNX&sw^ zqPmy_UBSWMa`(Pv@a|G9>RKRL1|Ip2T!{>T#4&p`Ut|Zo-z5}p;>w<<(1Uj%xwj@H zGeKt3wI`g{5CysRf6- zpluAAZTE=88%L*24I%>7qly4`@TMAh+2^8q3zcu{AKVuuQ!$ISGn3fYmHI4kVVui6hxG^8%-{PkS?al)M&PTGAQ$*GNq?Cv_&;EVzSf89oYw0i<)GY}3nR&dlc)r8fu7Z$L`jbcJ^YCPh&`ujn9k)f9*pt}T5lFmHi~j|XsCAU< zacZ3DycGQ_#OLWL6-<0c%=Bx=iV(%4OA9YhA@zfu#wyL$IYSz&%MXq~MM1R7eeM5DU&8o$lOdQiZZtM~feEwfZ(czi)8m>JbzW zAI+H;2HgWsk-uk*p6ORO-JvXMun`ucP@mJw3ULxD71_ip>ULN{b~Xw*T+&%+#X3)J z87Ixzc5vL2{l#ChknOs<>=5m&q*onvun~(~@{s?l^k;#LR5682Lq^!gbyxsa6R6Vf zDajlPpsAag&&%V%jqz2U?>^R%v~T7BxB{2-ZY+!6iHZ)E>dhF;%!_BC`4T_?Nx)0q ziz@6OQWdj;Qz8u}!*2WEVc=dFF4nmhQ9>j|fyi6|s5Hyxgs?OZO{%wW{Z50jj_McKj$dt_)N#+YZtH2A|33U$dE|jMBIJ_jd5e>sc&j}csee6FNT4~ zTuV|7QnWvSA0|^(a0>K=yLlnp9BkdZ9GwEmE@jRC=?crC{m@9%!rla>^?56Rr~4Tw z8}VZ?xwLq+ceYC?XFQ5fKo`>)otrXUCP See7x1`&!Nf0HpZQ^gjVW Date: Fri, 31 Jul 2020 16:42:53 +0200 Subject: [PATCH 038/132] Don't close inbound notifications substreams immediately (#6781) * Don't close inbound notifications substreams immediately * Fix not closing in return to node A closing --- client/network/src/lib.rs | 5 +- .../generic_proto/handler/notif_in.rs | 6 +-- .../generic_proto/upgrade/notifications.rs | 51 +++++++++++++++---- 3 files changed, 46 insertions(+), 16 deletions(-) diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index b8e5d7582b9..fc5cab321d1 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -197,13 +197,14 @@ //! handshake message can be of length 0, in which case the sender has to send a single `0`. //! - The receiver then either immediately closes the substream, or answers with its own //! LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which -//! case a single `0` has to be sent back. The receiver is then encouraged to close its sending -//! side. +//! case a single `0` has to be sent back. //! - Once the handshake has completed, the notifications protocol is unidirectional. Only the //! node which initiated the substream can push notifications. If the remote wants to send //! notifications as well, it has to open its own undirectional substream. //! - Each notification must be prefixed with an LEB128-encoded length. The encoding of the //! messages is specific to each protocol. +//! - Either party can signal that it doesn't want a notifications substream anymore by closing +//! its writing side. The other party should respond by closing its own writing side soon after. //! //! The API of `sc-network` allows one to register user-defined notification protocols. //! `sc-network` automatically tries to open a substream towards each node for which the legacy diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs index be78fb970e9..ddd78566fcd 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -163,11 +163,9 @@ impl ProtocolsHandler for NotifsInHandler { } // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get + // to the remote and force-close the substream. It might seem like an unclean way to get // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is the correct thing to do. - // Also note that we have already closed our writing side during the initial handshake, - // and we can't close "more" than that anyway. + // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. self.substream = Some(proto); self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index f1f41d5bccf..80fd7761f80 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -22,12 +22,13 @@ /// higher-level logic. This message is prefixed with a variable-length integer message length. /// This message can be empty, in which case `0` is sent. /// - If node B accepts the substream, it sends back a message with the same properties. -/// Afterwards, the sending side of B is closed. /// - If instead B refuses the connection (which typically happens because no empty slot is /// available), then it immediately closes the substream without sending back anything. /// - Node A can then send notifications to B, prefixed with a variable-length integer indicating /// the length of the message. -/// - Node A closes its writing side if it doesn't want the notifications substream anymore. +/// - Either node A or node B can signal that it doesn't want this notifications substream anymore +/// by closing its writing side. The other party should respond by also closing their own +/// writing side soon after. /// /// Notification substreams are unidirectional. If A opens a substream with B, then B is /// encouraged but not required to open a substream to A as well. @@ -80,9 +81,13 @@ enum NotificationsInSubstreamHandshake { /// User gave us the handshake message. Trying to push it in the socket. PendingSend(Vec), /// Handshake message was pushed in the socket. Still need to flush. - Close, - /// Handshake message successfully sent. + Flush, + /// Handshake message successfully sent and flushed. Sent, + /// Remote has closed their writing side. We close our own writing side in return. + ClosingInResponseToRemote, + /// Both our side and the remote have closed their writing side. + BothSidesClosed, } /// A substream for outgoing notification messages. @@ -177,8 +182,6 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, // This `Stream` implementation first tries to send back the handshake if necessary. loop { match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { - NotificationsInSubstreamHandshake::Sent => - return Stream::poll_next(this.socket.as_mut(), cx), NotificationsInSubstreamHandshake::NotSent => { *this.handshake = NotificationsInSubstreamHandshake::NotSent; return Poll::Pending @@ -186,7 +189,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, NotificationsInSubstreamHandshake::PendingSend(msg) => match Sink::poll_ready(this.socket.as_mut(), cx) { Poll::Ready(_) => { - *this.handshake = NotificationsInSubstreamHandshake::Close; + *this.handshake = NotificationsInSubstreamHandshake::Flush; match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { Ok(()) => {}, Err(err) => return Poll::Ready(Some(Err(err))), @@ -197,15 +200,43 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, return Poll::Pending } }, - NotificationsInSubstreamHandshake::Close => - match Sink::poll_close(this.socket.as_mut(), cx)? { + NotificationsInSubstreamHandshake::Flush => + match Sink::poll_flush(this.socket.as_mut(), cx)? { Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::Sent, Poll::Pending => { - *this.handshake = NotificationsInSubstreamHandshake::Close; + *this.handshake = NotificationsInSubstreamHandshake::Flush; return Poll::Pending } }, + + NotificationsInSubstreamHandshake::Sent => { + match Stream::poll_next(this.socket.as_mut(), cx) { + Poll::Ready(None) => *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote, + Poll::Ready(Some(msg)) => { + *this.handshake = NotificationsInSubstreamHandshake::Sent; + return Poll::Ready(Some(msg)) + }, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Sent; + return Poll::Pending + }, + } + }, + + NotificationsInSubstreamHandshake::ClosingInResponseToRemote => + match Sink::poll_close(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; + return Poll::Pending + } + }, + + NotificationsInSubstreamHandshake::BothSidesClosed => + return Poll::Ready(None), } } } -- GitLab From 8281317727440ccdd8c95378ccfa3c365ef7bd1c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 3 Aug 2020 10:30:06 +0200 Subject: [PATCH 039/132] client/network: Expose DHT query duration to Prometheus (#6784) Expose duration of DHT put and get request as a Prometheus histogram. --- client/network/src/behaviour.rs | 21 ++++++++++---------- client/network/src/discovery.rs | 34 ++++++++++++++++++++------------- client/network/src/service.rs | 28 +++++++++++++++++++++++++-- 3 files changed, 58 insertions(+), 25 deletions(-) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 2afcd274138..a43c61054d9 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -145,8 +145,9 @@ pub enum BehaviourOut { messages: Vec<(ConsensusEngineId, Bytes)>, }, - /// Event generated by a DHT. - Dht(DhtEvent), + /// Events generated by a DHT as a response to get_value or put_value requests as well as the + /// request duration. + Dht(DhtEvent, Duration), } impl Behaviour { @@ -454,17 +455,17 @@ impl NetworkBehaviourEventProcess DiscoveryOut::Discovered(peer_id) => { self.substrate.add_discovered_nodes(iter::once(peer_id)); } - DiscoveryOut::ValueFound(results) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results))); + DiscoveryOut::ValueFound(results, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); } - DiscoveryOut::ValueNotFound(key) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key))); + DiscoveryOut::ValueNotFound(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); } - DiscoveryOut::ValuePut(key) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key))); + DiscoveryOut::ValuePut(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); } - DiscoveryOut::ValuePutFailed(key) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key))); + DiscoveryOut::ValuePutFailed(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); } DiscoveryOut::RandomKademliaStarted(protocols) => { for protocol in protocols { diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 8216d6b2cbe..7cb977e8e1a 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -312,7 +312,7 @@ impl DiscoveryBehaviour { for k in self.kademlias.values_mut() { if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - self.pending_events.push_back(DiscoveryOut::ValuePutFailed(key.clone())); + self.pending_events.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); } } } @@ -379,17 +379,25 @@ pub enum DiscoveryOut { /// the `identify` protocol. UnroutablePeer(PeerId), - /// The DHT yielded results for the record request, grouped in (key, value) pairs. - ValueFound(Vec<(record::Key, Vec)>), + /// The DHT yielded results for the record request. + /// + /// Returning the result grouped in (key, value) pairs as well as the request duration.. + ValueFound(Vec<(record::Key, Vec)>, Duration), /// The record requested was not found in the DHT. - ValueNotFound(record::Key), + /// + /// Returning the corresponding key as well as the request duration. + ValueNotFound(record::Key, Duration), /// The record with a given key was successfully inserted into the DHT. - ValuePut(record::Key), + /// + /// Returning the corresponding key as well as the request duration. + ValuePut(record::Key, Duration), /// Inserting a value into the DHT failed. - ValuePutFailed(record::Key), + /// + /// Returning the corresponding key as well as the request duration. + ValuePutFailed(record::Key, Duration), /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. RandomKademliaStarted(Vec), @@ -620,7 +628,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } } - KademliaEvent::QueryResult { result: QueryResult::GetRecord(res), .. } => { + KademliaEvent::QueryResult { result: QueryResult::GetRecord(res), stats, .. } => { let ev = match res { Ok(ok) => { let results = ok.records @@ -628,28 +636,28 @@ impl NetworkBehaviour for DiscoveryBehaviour { .map(|r| (r.record.key, r.record.value)) .collect(); - DiscoveryOut::ValueFound(results) + DiscoveryOut::ValueFound(results, stats.duration().unwrap_or_else(Default::default)) } Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { trace!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key()) + DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } Err(e) => { warn!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key()) + DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } - KademliaEvent::QueryResult { result: QueryResult::PutRecord(res), .. } => { + KademliaEvent::QueryResult { result: QueryResult::PutRecord(res), stats, .. } => { let ev = match res { - Ok(ok) => DiscoveryOut::ValuePut(ok.key), + Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), Err(e) => { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - DiscoveryOut::ValuePutFailed(e.into_key()) + DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); diff --git a/client/network/src/service.rs b/client/network/src/service.rs index c11a620c567..65f93f58a55 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -31,6 +31,7 @@ use crate::{ ExHashT, NetworkStateInfo, behaviour::{Behaviour, BehaviourOut}, config::{parse_addr, parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, + DhtEvent, discovery::DiscoveryConfig, error::Error, network_state::{ @@ -1119,6 +1120,7 @@ struct Metrics { incoming_connections_total: Counter, is_major_syncing: Gauge, issued_light_requests: Counter, + kademlia_query_duration: HistogramVec, kademlia_random_queries_total: CounterVec, kademlia_records_count: GaugeVec, kademlia_records_sizes_total: GaugeVec, @@ -1196,6 +1198,17 @@ impl Metrics { "issued_light_requests", "Number of light client requests that our node has issued.", )?, registry)?, + kademlia_query_duration: register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_kademlia_query_duration", + "Duration of Kademlia queries per protocol and query type" + ), + buckets: prometheus_endpoint::exponential_buckets(0.5, 2.0, 10) + .expect("parameters are always valid values; qed"), + }, + &["type"] + )?, registry)?, kademlia_random_queries_total: register(CounterVec::new( Opts::new( "sub_libp2p_kademlia_random_queries_total", @@ -1508,8 +1521,19 @@ impl Future for NetworkWorker { messages, }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(ev))) => { - this.event_streams.send(Event::Dht(ev)); + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => { + if let Some(metrics) = this.metrics.as_ref() { + let query_type = match event { + DhtEvent::ValueFound(_) => "value-found", + DhtEvent::ValueNotFound(_) => "value-not-found", + DhtEvent::ValuePut(_) => "value-put", + DhtEvent::ValuePutFailed(_) => "value-put-failed", + }; + metrics.kademlia_query_duration.with_label_values(&[query_type]) + .observe(duration.as_secs_f64()); + } + + this.event_streams.send(Event::Dht(event)); }, Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); -- GitLab From 54e6298523ea896f2129546c26dd7a47dbbeedc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Aug 2020 10:46:53 +0200 Subject: [PATCH 040/132] Fix transaction payment runtime api (#6792) The transaction payment runtime api used its own extrinsic generic parameter. This is wrong, because this resulted in using always the native extrinsic. If there was a runtime upgrade that changed the extrinsic in some way, it would result in the api breaking. The correct way is to use the `Extrinsic` from the `Block` parameter. This is on the node side the opaque extrinsic and on the runtime side the real extrinsic. --- bin/node/rpc/src/lib.rs | 3 +-- bin/node/runtime/src/lib.rs | 3 +-- frame/transaction-payment/rpc/runtime-api/src/lib.rs | 5 ++--- frame/transaction-payment/rpc/src/lib.rs | 9 ++++----- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 7f1457356d9..106353983fe 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -33,7 +33,6 @@ use std::sync::Arc; use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; -use node_runtime::UncheckedExtrinsic; use sp_api::ProvideRuntimeApi; use sp_transaction_pool::TransactionPool; use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; @@ -106,7 +105,7 @@ pub fn create_full( C: Send + Sync + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_contracts_rpc::ContractsRuntimeApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 373a01b8ea2..fff0dd3427f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1101,9 +1101,8 @@ impl_runtime_apis! { impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, - UncheckedExtrinsic, > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } } diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 17a8bcdf44e..5575f8f7d09 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -56,11 +56,10 @@ fn deserialize_from_string<'de, D: Deserializer<'de>, T: std::str::FromStr>(dese } sp_api::decl_runtime_apis! { - pub trait TransactionPaymentApi where + pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay + MaybeFromStr, - Extrinsic: Codec, { - fn query_info(uxt: Extrinsic, len: u32) -> RuntimeDispatchInfo; + fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index d99907a6ac3..5043f0257fc 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -69,14 +69,13 @@ impl From for i64 { } } -impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> - for TransactionPayment +impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> + for TransactionPayment where Block: BlockT, C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: TransactionPaymentRuntimeApi, + C::Api: TransactionPaymentRuntimeApi, Balance: Codec + MaybeDisplay + MaybeFromStr, - Extrinsic: Codec + Send + Sync + 'static, { fn query_info( &self, @@ -91,7 +90,7 @@ where let encoded_len = encoded_xt.len() as u32; - let uxt: Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { code: ErrorCode::ServerError(Error::DecodeError.into()), message: "Unable to query dispatch info.".into(), data: Some(format!("{:?}", e).into()), -- GitLab From 125f77ddb7d8126318fe08f2c0ea967fb934d272 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Aug 2020 10:52:43 +0200 Subject: [PATCH 041/132] Bump elliptic from 6.5.2 to 6.5.3 in /.maintain/chaostest (#6791) Bumps [elliptic](https://github.com/indutny/elliptic) from 6.5.2 to 6.5.3. - [Release notes](https://github.com/indutny/elliptic/releases) - [Commits](https://github.com/indutny/elliptic/compare/v6.5.2...v6.5.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .maintain/chaostest/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.maintain/chaostest/package-lock.json b/.maintain/chaostest/package-lock.json index d975c9faf9e..8855f221a13 100644 --- a/.maintain/chaostest/package-lock.json +++ b/.maintain/chaostest/package-lock.json @@ -1720,9 +1720,9 @@ } }, "elliptic": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz", - "integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==", + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", + "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", "requires": { "bn.js": "^4.4.0", "brorand": "^1.0.1", -- GitLab From eb57b07a369441eee0dcd5065c508b5606f86d1d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 3 Aug 2020 11:08:06 +0200 Subject: [PATCH 042/132] client/network: Fix wrong metric help text (#6794) The `sub_libp2p_kademlia_query_duration` metric only has the dimension `type` not `protocol`. --- client/network/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 65f93f58a55..e4ba36be587 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1202,7 +1202,7 @@ impl Metrics { HistogramOpts { common_opts: Opts::new( "sub_libp2p_kademlia_query_duration", - "Duration of Kademlia queries per protocol and query type" + "Duration of Kademlia queries per query type" ), buckets: prometheus_endpoint::exponential_buckets(0.5, 2.0, 10) .expect("parameters are always valid values; qed"), -- GitLab From e81a3160d963456c88ce05b9391103ed405e0851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 3 Aug 2020 12:03:22 +0200 Subject: [PATCH 043/132] seal: Fix and improve error reporting (#6773) * seal: Rework ext_transfer, ext_instantiate, ext_call error handling * Deny calling plain accounts (must use transfer now) * Return proper module error rather than ad-hoc strings * Return the correct error codes from call,instantiate (documentation was wrong) * Make ext_transfer fallible again to make it consistent with ext_call * seal: Improve error messages on memory access failures * seal: Convert contract trapped to module error * seal: Add additional tests for transfer, call, instantiate These tests verify that those functions return the error types which are declared in its docs. * Make it more pronounced that to_execution_result handles trap_reason * Improve ReturnCode docs * Fix whitespace issues in wat files * Improve ReturnCode doc * Improve ErrorOrigin doc and variant naming * Improve docs on ExecResult and ExecError * Encode u32 sentinel value as hex * with_nested_context no longer accepts an Option for trie * Fix successful typo * Rename InvalidContractCalled to NotCallable --- frame/contracts/fixtures/call_return_code.wat | 44 +++ frame/contracts/fixtures/caller_contract.wat | 4 +- .../fixtures/destroy_and_transfer.wat | 8 +- frame/contracts/fixtures/drain.wat | 11 +- .../fixtures/instantiate_return_code.wat | 47 +++ frame/contracts/fixtures/ok_trap_revert.wat | 35 +++ .../fixtures/self_destructing_constructor.wat | 39 +-- frame/contracts/fixtures/set_rent.wat | 9 +- .../fixtures/transfer_return_code.wat | 31 ++ frame/contracts/src/exec.rs | 201 ++++++++----- frame/contracts/src/gas.rs | 11 +- frame/contracts/src/lib.rs | 24 +- frame/contracts/src/storage.rs | 1 + frame/contracts/src/tests.rs | 267 +++++++++++++++-- frame/contracts/src/wasm/mod.rs | 111 ++++++- frame/contracts/src/wasm/runtime.rs | 277 +++++++++++------- 16 files changed, 855 insertions(+), 265 deletions(-) create mode 100644 frame/contracts/fixtures/call_return_code.wat create mode 100644 frame/contracts/fixtures/instantiate_return_code.wat create mode 100644 frame/contracts/fixtures/ok_trap_revert.wat create mode 100644 frame/contracts/fixtures/transfer_return_code.wat diff --git a/frame/contracts/fixtures/call_return_code.wat b/frame/contracts/fixtures/call_return_code.wat new file mode 100644 index 00000000000..d724f904462 --- /dev/null +++ b/frame/contracts/fixtures/call_return_code.wat @@ -0,0 +1,44 @@ +;; This calls Django (4) and transfers 100 balance during this call and copies the return code +;; of this call to the output buffer. +;; It also forwards its input to the callee. +(module + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) address of django + (data (i32.const 0) "\04\00\00\00\00\00\00\00") + + ;; [8, 16) 100 balance + (data (i32.const 8) "\64\00\00\00\00\00\00\00") + + ;; [16, 20) here we store the return code of the transfer + + ;; [20, 24) here we store the input data + + ;; [24, 28) size of the input data + (data (i32.const 24) "\04") + + (func (export "deploy")) + + (func (export "call") + (call $ext_input (i32.const 20) (i32.const 24)) + (i32.store + (i32.const 16) + (call $ext_call + (i32.const 0) ;; Pointer to "callee" address. + (i32.const 8) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 20) ;; Pointer to input data buffer address + (i32.load (i32.const 24)) ;; Length of input data buffer + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Ptr to output buffer len + ) + ) + ;; exit with success and take transfer return code to the output buffer + (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + ) +) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index 369007834dc..ee2e16098d5 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -89,7 +89,7 @@ (call $ext_instantiate (i32.const 24) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. - (i64.const 200) ;; How much gas to devote for the execution. + (i64.const 187500000) ;; Just enough to pay for the instantiate (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address @@ -206,7 +206,7 @@ (call $ext_call (i32.const 16) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. - (i64.const 100) ;; How much gas to devote for the execution. + (i64.const 117500000) ;; Just enough to make the call (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index ee191aa019b..3f8a8c89b02 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -3,6 +3,7 @@ (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) @@ -139,16 +140,11 @@ ;; does not keep the contract alive. (call $assert (i32.eq - (call $ext_call + (call $ext_transfer (i32.const 80) ;; Pointer to destination address (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 1) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index 1b3172b2a01..22422bb859d 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -1,6 +1,6 @@ (module (import "env" "ext_balance" (func $ext_balance (param i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 8) reserved for $ext_balance output @@ -36,18 +36,13 @@ ;; Self-destruct by sending full balance to the 0 address. (call $assert (i32.eq - (call $ext_call + (call $ext_transfer (i32.const 16) ;; Pointer to destination address (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case ) - (i32.const 0) + (i32.const 4) ;; ReturnCode::BelowSubsistenceThreshold ) ) ) diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat new file mode 100644 index 00000000000..bce80ca01fa --- /dev/null +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -0,0 +1,47 @@ +;; This instantiats Charlie (3) and transfers 100 balance during this call and copies the return code +;; of this call to the output buffer. +;; The first 32 byte of input is the code hash to instantiate +;; The rest of the input is forwarded to the constructor of the callee +(module + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) address of django + (data (i32.const 0) "\04\00\00\00\00\00\00\00") + + ;; [8, 16) 100 balance + (data (i32.const 8) "\64\00\00\00\00\00\00\00") + + ;; [16, 20) here we store the return code of the transfer + + ;; [20, 24) size of the input buffer + (data (i32.const 20) "\FF") + + ;; [24, inf) input buffer + + (func (export "deploy")) + + (func (export "call") + (call $ext_input (i32.const 24) (i32.const 20)) + (i32.store + (i32.const 16) + (call $ext_instantiate + (i32.const 24) ;; Pointer to the code hash. + (i32.const 32) ;; Length of the code hash. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 56) ;; Pointer to input data buffer address + (i32.sub (i32.load (i32.const 20)) (i32.const 32)) ;; Length of input data buffer + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + ;; exit with success and take transfer return code to the output buffer + (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + ) +) diff --git a/frame/contracts/fixtures/ok_trap_revert.wat b/frame/contracts/fixtures/ok_trap_revert.wat new file mode 100644 index 00000000000..5877e55d0e7 --- /dev/null +++ b/frame/contracts/fixtures/ok_trap_revert.wat @@ -0,0 +1,35 @@ +(module + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "deploy") + (call $ok_trap_revert) + ) + + (func (export "call") + (call $ok_trap_revert) + ) + + (func $ok_trap_revert + (i32.store (i32.const 4) (i32.const 4)) + (call $ext_input (i32.const 0) (i32.const 4)) + (block $IF_2 + (block $IF_1 + (block $IF_0 + (br_table $IF_0 $IF_1 $IF_2 + (i32.load8_u (i32.const 0)) + ) + (unreachable) + ) + ;; 0 = return with success + return + ) + ;; 1 = revert + (call $ext_return (i32.const 1) (i32.const 0) (i32.const 0)) + (unreachable) + ) + ;; 2 = trap + (unreachable) + ) +) \ No newline at end of file diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index 3b99db001cd..ece5679f4f6 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -1,15 +1,7 @@ (module - (import "env" "ext_balance" (func $ext_balance (param i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $ext_balance output - - ;; [8, 16) length of the buffer - (data (i32.const 8) "\08") - - ;; [16, inf) zero initialized - (func $assert (param i32) (block $ok (br_if $ok @@ -20,33 +12,10 @@ ) (func (export "deploy") - ;; Send entire remaining balance to the 0 address. - (call $ext_balance (i32.const 0) (i32.const 8)) - - ;; Balance should be encoded as a u64. - (call $assert - (i32.eq - (i32.load (i32.const 8)) - (i32.const 8) - ) - ) - ;; Self-destruct by sending full balance to the 0 address. - (call $assert - (i32.eq - (call $ext_call - (i32.const 16) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - ) - (i32.const 0) - ) + (call $ext_terminate + (i32.const 0) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address ) ) diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index 4e6424e7201..ba52e9ed752 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -1,5 +1,5 @@ (module - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) @@ -24,7 +24,12 @@ ;; transfer 50 to CHARLIE (func $call_2 - (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (call $assert + (i32.eq + (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (i32.const 0) + ) + ) ) ;; do nothing diff --git a/frame/contracts/fixtures/transfer_return_code.wat b/frame/contracts/fixtures/transfer_return_code.wat new file mode 100644 index 00000000000..87d186811e7 --- /dev/null +++ b/frame/contracts/fixtures/transfer_return_code.wat @@ -0,0 +1,31 @@ +;; This transfers 100 balance to the zero account and copies the return code +;; of this transfer to the output buffer. +(module + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 8) zero-adress + (data (i32.const 0) "\00\00\00\00\00\00\00\00") + + ;; [8, 16) 100 balance + (data (i32.const 8) "\64\00\00\00\00\00\00\00") + + ;; [16, 20) here we store the return code of the transfer + + (func (export "deploy")) + + (func (export "call") + (i32.store + (i32.const 16) + (call $ext_transfer + (i32.const 0) ;; ptr to destination address + (i32.const 8) ;; length of destination address + (i32.const 8) ;; ptr to value to transfer + (i32.const 8) ;; length of value to transfer + ) + ) + ;; exit with success and take transfer return code to the output buffer + (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + ) +) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index e8965692aa2..a2fb50dd3f3 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::{CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo, TrieIdGenerator}; -use crate::{gas::{Gas, GasMeter, Token}, rent, storage, Error, ContractInfoOf}; +use crate::{ + CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, + TrieId, BalanceOf, ContractInfo, TrieIdGenerator, + gas::{Gas, GasMeter, Token}, rent, storage, Error, ContractInfoOf +}; use bitflags::bitflags; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, Zero, Convert, Saturating}; @@ -69,7 +71,39 @@ impl ExecReturnValue { } } -pub type ExecResult = Result; +/// Call or instantiate both call into other contracts and pass through errors happening +/// in those to the caller. This enum is for the caller to distinguish whether the error +/// happened during the execution of the callee or in the current execution context. +#[cfg_attr(test, derive(PartialEq, Eq, Debug))] +pub enum ErrorOrigin { + /// The error happened in the current exeuction context rather than in the one + /// of the contract that is called into. + Caller, + /// The error happened during execution of the called contract. + Callee, +} + +/// Error returned by contract exection. +#[cfg_attr(test, derive(PartialEq, Eq, Debug))] +pub struct ExecError { + /// The reason why the execution failed. + pub error: DispatchError, + /// Origin of the error. + pub origin: ErrorOrigin, +} + +impl> From for ExecError { + fn from(error: T) -> Self { + Self { + error: error.into(), + origin: ErrorOrigin::Caller, + } + } +} + +/// The result that is returned from contract execution. It either contains the output +/// buffer or an error describing the reason for failure. +pub type ExecResult = Result; /// An interface that provides access to the external environment in which the /// smart-contract is executed. @@ -99,7 +133,7 @@ pub trait Ext { value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), DispatchError>; + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; /// Transfer some amount of funds into the specified account. fn transfer( @@ -282,12 +316,12 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: TrieId) -> ExecutionContext<'b, T, V, L> { ExecutionContext { caller: Some(self), - self_trie_id: trie_id, + self_trie_id: Some(trie_id), self_account: dest, depth: self.depth + 1, config: self.config, @@ -307,31 +341,31 @@ where input_data: Vec, ) -> ExecResult { if self.depth == self.config.max_depth as usize { - Err("reached maximum depth, cannot make a call")? + Err(Error::::MaxCallDepthReached)? } if gas_meter .charge(self.config, ExecFeeToken::Call) .is_out_of_gas() { - Err("not enough gas to pay base call fee")? + Err(Error::::OutOfGas)? } // Assumption: `collect_rent` doesn't collide with overlay because // `collect_rent` will be done on first call and destination contract and balance // cannot be changed before the first call - let contract_info = rent::collect_rent::(&dest); - - // Calls to dead contracts always fail. - if let Some(ContractInfo::Tombstone(_)) = contract_info { - Err("contract has been evicted")? + // We do not allow 'calling' plain accounts. For transfering value + // `ext_transfer` must be used. + let contract = if let Some(ContractInfo::Alive(info)) = rent::collect_rent::(&dest) { + info + } else { + Err(Error::::NotCallable)? }; let transactor_kind = self.transactor_kind(); let caller = self.self_account.clone(); - let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); - self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + self.with_nested_context(dest.clone(), contract.trie_id.clone(), |nested| { if value > BalanceOf::::zero() { transfer( gas_meter, @@ -344,22 +378,15 @@ where )? } - // If code_hash is not none, then the destination account is a live contract, otherwise - // it is a regular account since tombstone accounts have already been rejected. - match storage::code_hash::(&dest) { - Ok(dest_code_hash) => { - let executable = nested.loader.load_main(&dest_code_hash)?; - let output = nested.vm - .execute( - &executable, - nested.new_call_context(caller, value), - input_data, - gas_meter, - )?; - Ok(output) - } - Err(storage::ContractAbsentError) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), - } + let executable = nested.loader.load_main(&contract.code_hash) + .map_err(|_| Error::::CodeNotFound)?; + let output = nested.vm.execute( + &executable, + nested.new_call_context(caller, value), + input_data, + gas_meter, + ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; + Ok(output) }) } @@ -369,16 +396,16 @@ where gas_meter: &mut GasMeter, code_hash: &CodeHash, input_data: Vec, - ) -> Result<(T::AccountId, ExecReturnValue), DispatchError> { + ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { if self.depth == self.config.max_depth as usize { - Err("reached maximum depth, cannot instantiate")? + Err(Error::::MaxCallDepthReached)? } if gas_meter .charge(self.config, ExecFeeToken::Instantiate) .is_out_of_gas() { - Err("not enough gas to pay base instantiate fee")? + Err(Error::::OutOfGas)? } let transactor_kind = self.transactor_kind(); @@ -394,7 +421,7 @@ where // Generate it now. let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); - let output = self.with_nested_context(dest.clone(), Some(dest_trie_id), |nested| { + let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { storage::place_contract::( &dest, nested @@ -416,21 +443,21 @@ where nested, )?; - let executable = nested.loader.load_init(&code_hash)?; + let executable = nested.loader.load_init(&code_hash) + .map_err(|_| Error::::CodeNotFound)?; let output = nested.vm .execute( &executable, nested.new_call_context(caller.clone(), endowment), input_data, gas_meter, - )?; + ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - // Error out if insufficient remaining balance. // We need each contract that exists to be above the subsistence threshold // in order to keep up the guarantuee that we always leave a tombstone behind // with the exception of a contract that called `ext_terminate`. - if T::Currency::free_balance(&dest) < nested.config.subsistence_threshold() { - Err("insufficient remaining balance")? + if T::Currency::total_balance(&dest) < nested.config.subsistence_threshold() { + Err(Error::::NewContractNotFunded)? } // Deposit an instantiation event. @@ -459,7 +486,7 @@ where } /// Execute the given closure within a nested execution context. - fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) + fn with_nested_context(&mut self, dest: T::AccountId, trie_id: TrieId, func: F) -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { @@ -569,7 +596,7 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( }; if gas_meter.charge(ctx.config, token).is_out_of_gas() { - Err("not enough gas to pay transfer fee")? + Err(Error::::OutOfGas)? } // Only ext_terminate is allowed to bring the sender below the subsistence @@ -580,13 +607,15 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( ensure!( T::Currency::total_balance(transactor).saturating_sub(value) >= ctx.config.subsistence_threshold(), - Error::::InsufficientBalance, + Error::::BelowSubsistenceThreshold, ); ExistenceRequirement::KeepAlive }, (_, PlainAccount) => ExistenceRequirement::KeepAlive, }; - T::Currency::transfer(transactor, dest, value, existence_requirement)?; + + T::Currency::transfer(transactor, dest, value, existence_requirement) + .map_err(|_| Error::::TransferFailed)?; Ok(()) } @@ -653,7 +682,7 @@ where endowment: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), DispatchError> { + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) } @@ -837,13 +866,13 @@ fn deposit_event( mod tests { use super::{ BalanceOf, Event, ExecFeeToken, ExecResult, ExecutionContext, Ext, Loader, - RawEvent, TransferFeeKind, TransferFeeToken, Vm, ReturnFlags, + RawEvent, TransferFeeKind, TransferFeeToken, Vm, ReturnFlags, ExecError, ErrorOrigin }; use crate::{ gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, exec::ExecReturnValue, CodeHash, Config, gas::Gas, - storage, + storage, Error }; use crate::tests::test_utils::{place_contract, set_balance, get_balance}; use sp_runtime::DispatchError; @@ -999,11 +1028,19 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let result = ctx.call(dest, 0, &mut gas_meter, vec![]); + let result = super::transfer( + &mut gas_meter, + super::TransferCause::Call, + super::TransactorKind::PlainAccount, + &origin, + &dest, + 0, + &mut ctx, + ); assert_matches!(result, Ok(_)); let mut toks = gas_meter.tokens().iter(); - match_tokens!(toks, ExecFeeToken::Call,); + match_tokens!(toks, TransferFeeToken { kind: TransferFeeKind::Transfer },); }); // This test verifies that base fee for instantiation is taken. @@ -1043,14 +1080,18 @@ mod tests { set_balance(&origin, 100); set_balance(&dest, 0); - let output = ctx.call( - dest, + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + + super::transfer( + &mut gas_meter, + super::TransferCause::Call, + super::TransactorKind::PlainAccount, + &origin, + &dest, 55, - &mut GasMeter::::new(GAS_LIMIT), - vec![], + &mut ctx, ).unwrap(); - assert!(output.is_success()); assert_eq!(get_balance(&origin), 45); assert_eq!(get_balance(&dest), 55); }); @@ -1107,13 +1148,20 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let result = ctx.call(dest, 50, &mut gas_meter, vec![]); + let result = super::transfer( + &mut gas_meter, + super::TransferCause::Call, + super::TransactorKind::PlainAccount, + &origin, + &dest, + 50, + &mut ctx, + ); assert_matches!(result, Ok(_)); let mut toks = gas_meter.tokens().iter(); match_tokens!( toks, - ExecFeeToken::Call, TransferFeeToken { kind: TransferFeeKind::Transfer, }, @@ -1132,13 +1180,20 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let result = ctx.call(dest, 50, &mut gas_meter, vec![]); + let result = super::transfer( + &mut gas_meter, + super::TransferCause::Call, + super::TransactorKind::PlainAccount, + &origin, + &dest, + 50, + &mut ctx, + ); assert_matches!(result, Ok(_)); let mut toks = gas_meter.tokens().iter(); match_tokens!( toks, - ExecFeeToken::Call, TransferFeeToken { kind: TransferFeeKind::Transfer, }, @@ -1189,16 +1244,19 @@ mod tests { let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); set_balance(&origin, 0); - let result = ctx.call( - dest, - 100, + let result = super::transfer( &mut GasMeter::::new(GAS_LIMIT), - vec![], + super::TransferCause::Call, + super::TransactorKind::PlainAccount, + &origin, + &dest, + 100, + &mut ctx, ); - assert_matches!( + assert_eq!( result, - Err(DispatchError::Module { message: Some("InsufficientBalance"), .. }) + Err(Error::::TransferFailed.into()) ); assert_eq!(get_balance(&origin), 0); assert_eq!(get_balance(&dest), 0); @@ -1335,9 +1393,9 @@ mod tests { if !*reached_bottom { // We are first time here, it means we just reached bottom. // Verify that we've got proper error and set `reached_bottom`. - assert_matches!( + assert_eq!( r, - Err(DispatchError::Other("reached maximum depth, cannot make a call")) + Err(Error::::MaxCallDepthReached.into()) ); *reached_bottom = true; } else { @@ -1604,7 +1662,10 @@ mod tests { ctx.gas_meter, vec![] ), - Err(DispatchError::Other("It's a trap!")) + Err(ExecError { + error: DispatchError::Other("It's a trap!"), + origin: ErrorOrigin::Callee, + }) ); exec_success() @@ -1648,14 +1709,14 @@ mod tests { let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); - assert_matches!( + assert_eq!( ctx.instantiate( 100, &mut GasMeter::::new(GAS_LIMIT), &terminate_ch, vec![], ), - Err(DispatchError::Other("insufficient remaining balance")) + Err(Error::::NewContractNotFunded.into()) ); assert_eq!( diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index d6c7f82753e..decaf11b796 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::Trait; +use crate::{Trait, exec::ExecError}; use sp_std::marker::PhantomData; use sp_runtime::traits::Zero; use frame_support::dispatch::{ - DispatchError, DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, + DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, }; #[cfg(test)] @@ -189,8 +189,9 @@ impl GasMeter { } /// Turn this GasMeter into a DispatchResult that contains the actually used gas. - pub fn into_dispatch_result(self, result: Result) -> DispatchResultWithPostInfo where - E: Into, + pub fn into_dispatch_result(self, result: Result) -> DispatchResultWithPostInfo + where + E: Into, { let post_info = PostDispatchInfo { actual_weight: Some(self.gas_spent()), @@ -199,7 +200,7 @@ impl GasMeter { result .map(|_| post_info) - .map_err(|e| DispatchErrorWithPostInfo { post_info, error: e.into() }) + .map_err(|e| DispatchErrorWithPostInfo { post_info, error: e.into().error }) } #[cfg(test)] diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 6d0b481dd0d..24e5ece5bb8 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -95,6 +95,7 @@ use crate::wasm::{WasmLoader, WasmVm}; pub use crate::gas::{Gas, GasMeter}; pub use crate::exec::{ExecResult, ExecReturnValue}; +pub use crate::wasm::ReturnCode as RuntimeReturnCode; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; @@ -420,9 +421,30 @@ decl_error! { /// the subsistence threshold. No transfer is allowed to do this in order to allow /// for a tombstone to be created. Use `ext_terminate` to remove a contract without /// leaving a tombstone behind. - InsufficientBalance, + BelowSubsistenceThreshold, + /// The newly created contract is below the subsistence threshold after executing + /// its contructor. No contracts are allowed to exist below that threshold. + NewContractNotFunded, + /// Performing the requested transfer failed for a reason originating in the + /// chosen currency implementation of the runtime. Most probably the balance is + /// too low or locks are placed on it. + TransferFailed, + /// Performing a call was denied because the calling depth reached the limit + /// of what is specified in the schedule. + MaxCallDepthReached, + /// The contract that was called is either no contract at all (a plain account) + /// or is a tombstone. + NotCallable, /// The code supplied to `put_code` exceeds the limit specified in the current schedule. CodeTooLarge, + /// No code could be found at the supplied code hash. + CodeNotFound, + /// A buffer outside of sandbox memory was passed to a contract API function. + OutOfBounds, + /// Input passed to a contract API function failed to decode as expected type. + DecodingFailed, + /// Contract trapped during execution. + ContractTrapped, } } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 4c5ad892a96..3740952778f 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -149,6 +149,7 @@ pub fn set_rent_allowance( } /// Returns the code hash of the contract specified by `account` ID. +#[cfg(test)] pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> { >::get(account) .and_then(|i| i.as_alive().map(|i| i.code_hash)) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 5b2d7feb3d1..37ded30a693 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -17,7 +17,7 @@ use crate::{ BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, - Error, + Error, Config, RuntimeReturnCode, }; use assert_matches::assert_matches; use hex_literal::*; @@ -30,8 +30,9 @@ use sp_runtime::{ use frame_support::{ assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, StorageMap, StorageValue, - traits::{Currency, Get}, + traits::{Currency, Get, ReservableCurrency}, weights::{Weight, PostDispatchInfo}, + dispatch::DispatchErrorWithPostInfo, }; use std::cell::RefCell; use frame_system::{self as system, EventRecord, Phase}; @@ -63,6 +64,7 @@ impl_outer_dispatch! { } } +#[macro_use] pub mod test_utils { use super::{Test, Balances}; use crate::{ContractInfoOf, TrieIdGenerator, CodeHash}; @@ -89,6 +91,12 @@ pub mod test_utils { pub fn get_balance(who: &u64) -> u64 { Balances::free_balance(who) } + macro_rules! assert_return_code { + ( $x:expr , $y:expr $(,)? ) => {{ + use sp_std::convert::TryInto; + assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); + }} + } } thread_local! { @@ -279,19 +287,23 @@ where Ok((wasm_binary, code_hash)) } -// Perform a simple transfer to a non-existent account. +// Perform a call to a plain account. +// The actual transfer fails because we can only call contracts. // Then we check that only the base costs are returned as actual costs. #[test] -fn returns_base_call_cost() { +fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 100_000_000); assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), - Ok( - PostDispatchInfo { - actual_weight: Some(67500000), - pays_fee: Default::default(), + Err( + DispatchErrorWithPostInfo { + error: Error::::NotCallable.into(), + post_info: PostDispatchInfo { + actual_weight: Some(67500000), + pays_fee: Default::default(), + }, } ) ); @@ -987,7 +999,7 @@ fn call_removed_contract() { // Calling contract should remove contract and fail. assert_err_ignore_postinfo!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - "contract has been evicted" + Error::::NotCallable ); // Calling a contract that is about to evict shall emit an event. assert_eq!(System::events(), vec![ @@ -1001,7 +1013,7 @@ fn call_removed_contract() { // Subsequent contract calls should also fail. assert_err_ignore_postinfo!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - "contract has been evicted" + Error::::NotCallable ); }) } @@ -1128,7 +1140,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // we expect that it will get removed leaving tombstone. assert_err_ignore_postinfo!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - "contract has been evicted" + Error::::NotCallable ); assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); assert_eq!(System::events(), vec![ @@ -1181,7 +1193,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert_err_ignore_postinfo!( perform_the_restoration(), - "contract trapped during execution" + Error::::ContractTrapped, ); assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); @@ -1309,7 +1321,7 @@ fn storage_max_value_limit() { GAS_LIMIT, Encode::encode(&(self::MaxValueSize::get() + 1)), ), - "contract trapped during execution" + Error::::ContractTrapped, ); }); } @@ -1373,17 +1385,16 @@ fn cannot_self_destruct_through_draning() { Some(ContractInfo::Alive(_)) ); - // Call BOB with no input data, forcing it to run until out-of-balance - // and eventually trapping because below existential deposit. - assert_err_ignore_postinfo!( + // Call BOB which makes it send all funds to the zero address + // The contract code asserts that the correct error value is returned. + assert_ok!( Contracts::call( Origin::signed(ALICE), BOB, 0, GAS_LIMIT, vec![], - ), - "contract trapped during execution" + ) ); }); } @@ -1423,7 +1434,7 @@ fn cannot_self_destruct_while_live() { GAS_LIMIT, vec![0], ), - "contract trapped during execution" + Error::::ContractTrapped, ); // Check that BOB is still alive. @@ -1535,8 +1546,7 @@ fn cannot_self_destruct_in_constructor() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - // Fail to instantiate the BOB because the call that is issued in the deploy - // function exhausts all balances which puts it below the existential deposit. + // Fail to instantiate the BOB because the contructor calls ext_terminate. assert_err_ignore_postinfo!( Contracts::instantiate( Origin::signed(ALICE), @@ -1545,7 +1555,7 @@ fn cannot_self_destruct_in_constructor() { code_hash.into(), vec![], ), - "contract trapped during execution" + Error::::NewContractNotFunded, ); }); } @@ -1603,3 +1613,216 @@ fn crypto_hashes() { } }) } + +#[test] +fn transfer_return_code() { + let (wasm, code_hash) = compile_module::("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Config::::subsistence_threshold_uncached(); + let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + + assert_ok!( + Contracts::instantiate( + Origin::signed(ALICE), + subsistence, + GAS_LIMIT, + code_hash.into(), + vec![], + ), + ); + + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); + + // Contract has enough total balance in order to not go below the subsistence + // threshold when transfering 100 balance but this balance is reserved so + // the transfer still fails but with another return code. + Balances::make_free_balance_be(&BOB, subsistence + 100); + Balances::reserve(&BOB, subsistence + 100).unwrap(); + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + }); +} + +#[test] +fn call_return_code() { + let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); + let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Config::::subsistence_threshold_uncached(); + let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + + assert_ok!( + Contracts::instantiate( + Origin::signed(ALICE), + subsistence, + GAS_LIMIT, + caller_hash.into(), + vec![0], + ), + ); + + // Contract calls into Django which is no valid contract + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::NotCallable); + + assert_ok!( + Contracts::instantiate( + Origin::signed(CHARLIE), + subsistence, + GAS_LIMIT, + callee_hash.into(), + vec![0], + ), + ); + + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); + + // Contract has enough total balance in order to not go below the subsistence + // threshold when transfering 100 balance but this balance is reserved so + // the transfer still fails but with another return code. + Balances::make_free_balance_be(&BOB, subsistence + 100); + Balances::reserve(&BOB, subsistence + 100).unwrap(); + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + + // Contract has enough balance but callee reverts because "1" is passed. + Balances::make_free_balance_be(&BOB, subsistence + 1000); + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![1], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::CalleeReverted); + + // Contract has enough balance but callee traps because "2" is passed. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![2], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); + + }); +} + +#[test] +fn instantiate_return_code() { + let (caller_code, caller_hash) = compile_module::("instantiate_return_code").unwrap(); + let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Config::::subsistence_threshold_uncached(); + let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + let callee_hash = callee_hash.as_ref().to_vec(); + + assert_ok!( + Contracts::instantiate( + Origin::signed(ALICE), + subsistence, + GAS_LIMIT, + caller_hash.into(), + vec![], + ), + ); + + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0; 33], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); + + // Contract has enough total balance in order to not go below the subsistence + // threshold when transfering 100 balance but this balance is reserved so + // the transfer still fails but with another return code. + Balances::make_free_balance_be(&BOB, subsistence + 100); + Balances::reserve(&BOB, subsistence + 100).unwrap(); + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0; 33], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + + // Contract has enough balance but the passed code hash is invalid + Balances::make_free_balance_be(&BOB, subsistence + 1000); + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + vec![0; 33], + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::CodeNotFound); + + // Contract has enough balance but callee reverts because "1" is passed. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + callee_hash.iter().cloned().chain(sp_std::iter::once(1)).collect(), + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::CalleeReverted); + + // Contract has enough balance but callee traps because "2" is passed. + let result = Contracts::bare_call( + ALICE, + BOB, + 0, + GAS_LIMIT, + callee_hash.iter().cloned().chain(sp_std::iter::once(2)).collect(), + ).0.unwrap(); + assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); + + }); +} diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 68dbae896b0..7f985e90b66 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -36,6 +36,7 @@ use self::runtime::{to_execution_result, Runtime}; use self::code_cache::load as load_code; pub use self::code_cache::save as save_code; +pub use self::runtime::ReturnCode; /// A prepared wasm module ready for execution. #[derive(Clone, Encode, Decode)] @@ -152,13 +153,12 @@ mod tests { use super::*; use std::collections::HashMap; use sp_core::H256; - use crate::exec::{Ext, StorageKey, ExecReturnValue, ReturnFlags}; + use crate::exec::{Ext, StorageKey, ExecReturnValue, ReturnFlags, ExecError, ErrorOrigin}; use crate::gas::{Gas, GasMeter}; use crate::tests::{Test, Call}; use crate::wasm::prepare::prepare_contract; - use crate::{CodeHash, BalanceOf}; + use crate::{CodeHash, BalanceOf, Error}; use hex_literal::hex; - use assert_matches::assert_matches; use sp_runtime::DispatchError; use frame_support::weights::Weight; @@ -225,7 +225,7 @@ mod tests { endowment: u64, gas_meter: &mut GasMeter, data: Vec, - ) -> Result<(u64, ExecReturnValue), DispatchError> { + ) -> Result<(u64, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, @@ -365,7 +365,7 @@ mod tests { value: u64, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(u64, ExecReturnValue), DispatchError> { + ) -> Result<(u64, ExecReturnValue), ExecError> { (**self).instantiate(code, value, gas_meter, input_data) } fn transfer( @@ -483,14 +483,16 @@ mod tests { ;; value_ptr: u32, ;; value_len: u32, ;;) -> u32 - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $ext_transfer - (i32.const 4) ;; Pointer to "account" address. - (i32.const 8) ;; Length of "account" address. - (i32.const 12) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. + (drop + (call $ext_transfer + (i32.const 4) ;; Pointer to "account" address. + (i32.const 8) ;; Length of "account" address. + (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + ) ) ) (func (export "deploy")) @@ -521,7 +523,7 @@ mod tests { to: 7, value: 153, data: Vec::new(), - gas_left: 9989500000, + gas_left: 9989000000, }] ); } @@ -1503,14 +1505,17 @@ mod tests { // Checks that the runtime traps if there are more than `max_topic_events` topics. let mut gas_meter = GasMeter::new(GAS_LIMIT); - assert_matches!( + assert_eq!( execute( CODE_DEPOSIT_EVENT_MAX_TOPICS, vec![], MockExt::default(), &mut gas_meter ), - Err(DispatchError::Other("contract trapped during execution")) + Err(ExecError { + error: Error::::ContractTrapped.into(), + origin: ErrorOrigin::Caller, + }) ); } @@ -1545,14 +1550,17 @@ mod tests { // Checks that the runtime traps if there are duplicates. let mut gas_meter = GasMeter::new(GAS_LIMIT); - assert_matches!( + assert_eq!( execute( CODE_DEPOSIT_EVENT_DUPLICATES, vec![], MockExt::default(), &mut gas_meter ), - Err(DispatchError::Other("contract trapped during execution")) + Err(ExecError { + error: Error::::ContractTrapped.into(), + origin: ErrorOrigin::Caller, + }) ); } @@ -1666,4 +1674,75 @@ mod tests { assert_eq!(output, ExecReturnValue { flags: ReturnFlags::REVERT, data: hex!("5566778899").to_vec() }); assert!(!output.is_success()); } + + const CODE_OUT_OF_BOUNDS_ACCESS: &str = r#" +(module + (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "deploy")) + + (func (export "call") + (call $ext_terminate + (i32.const 65536) ;; Pointer to "account" address (out of bound). + (i32.const 8) ;; Length of "account" address. + ) + ) +) +"#; + + #[test] + fn contract_out_of_bounds_access() { + let mut mock_ext = MockExt::default(); + let result = execute( + CODE_OUT_OF_BOUNDS_ACCESS, + vec![], + &mut mock_ext, + &mut GasMeter::new(GAS_LIMIT), + ); + + assert_eq!( + result, + Err(ExecError { + error: Error::::OutOfBounds.into(), + origin: ErrorOrigin::Caller, + }) + ); + } + + const CODE_DECODE_FAILURE: &str = r#" +(module + (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "deploy")) + + (func (export "call") + (call $ext_terminate + (i32.const 0) ;; Pointer to "account" address. + (i32.const 4) ;; Length of "account" address (too small -> decode fail). + ) + ) +) +"#; + + #[test] + fn contract_decode_failure() { + let mut mock_ext = MockExt::default(); + let result = execute( + CODE_DECODE_FAILURE, + vec![], + &mut mock_ext, + &mut GasMeter::new(GAS_LIMIT), + ); + + assert_eq!( + result, + Err(ExecError { + error: Error::::DecodingFailed.into(), + origin: ErrorOrigin::Caller, + }) + ); + } + } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index ab93076f57b..ed97a4dae3c 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -18,7 +18,7 @@ use crate::{Schedule, Trait, CodeHash, BalanceOf, Error}; use crate::exec::{ - Ext, ExecResult, ExecReturnValue, StorageKey, TopicOf, ReturnFlags, + Ext, ExecResult, ExecReturnValue, StorageKey, TopicOf, ReturnFlags, ExecError }; use crate::gas::{Gas, GasMeter, Token, GasMeterResult}; use crate::wasm::env_def::ConvertibleToWasm; @@ -36,21 +36,33 @@ use sp_io::hashing::{ sha2_256, }; -/// Every error that can be returned from a runtime API call. +/// Every error that can be returned to a contract when it calls any of the host functions. #[repr(u32)] pub enum ReturnCode { /// API call successful. Success = 0, /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. - /// Can only be returned from `ext_call` and `ext_instantiate`. CalleeTrapped = 1, /// The called function ran to completion but decided to revert its state. /// An output buffer is returned when one was supplied. - /// Can only be returned from `ext_call` and `ext_instantiate`. CalleeReverted = 2, /// The passed key does not exist in storage. KeyNotFound = 3, + /// Transfer failed because it would have brought the sender's total balance below the + /// subsistence threshold. + BelowSubsistenceThreshold = 4, + /// Transfer failed for other reasons. Most probably reserved or locked balance of the + /// sender prevents the transfer. + TransferFailed = 5, + /// The newly created contract is below the subsistence threshold after executing + /// its constructor. + NewContractNotFunded = 6, + /// No code could be found at the supplied code hash. + CodeNotFound = 7, + /// The contract that was called is either no contract at all (a plain account) + /// or is a tombstone. + NotCallable = 8, } impl ConvertibleToWasm for ReturnCode { @@ -66,7 +78,7 @@ impl ConvertibleToWasm for ReturnCode { } impl From for ReturnCode { - fn from(from: ExecReturnValue) -> ReturnCode { + fn from(from: ExecReturnValue) -> Self { if from.flags.contains(ReturnFlags::REVERT) { Self::CalleeReverted } else { @@ -96,7 +108,7 @@ enum TrapReason { SupervisorError(DispatchError), /// Signals that trap was generated in response to call `ext_return` host function. Return(ReturnData), - /// Signals that a trap was generated in response to a succesful call to the + /// Signals that a trap was generated in response to a successful call to the /// `ext_terminate` host function. Termination, /// Signals that a trap was generated because of a successful restoration. @@ -131,35 +143,42 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { } } +/// Converts the sandbox result and the runtime state into the execution outcome. +/// +/// It evaluates information stored in the `trap_reason` variable of the runtime and +/// bases the outcome on the value if this variable. Only if `trap_reason` is `None` +/// the result of the sandbox is evaluated. pub(crate) fn to_execution_result( runtime: Runtime, sandbox_result: Result, ) -> ExecResult { - match runtime.trap_reason { - // The trap was the result of the execution `return` host function. - Some(TrapReason::Return(ReturnData{ flags, data })) => { - let flags = ReturnFlags::from_bits(flags).ok_or_else(|| - "used reserved bit in return flags" - )?; - return Ok(ExecReturnValue { - flags, - data, - }) - }, - Some(TrapReason::Termination) => { - return Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - Some(TrapReason::Restoration) => { - return Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) + // If a trap reason is set we base our decision solely on that. + if let Some(trap_reason) = runtime.trap_reason { + return match trap_reason { + // The trap was the result of the execution `return` host function. + TrapReason::Return(ReturnData{ flags, data }) => { + let flags = ReturnFlags::from_bits(flags).ok_or_else(|| + "used reserved bit in return flags" + )?; + Ok(ExecReturnValue { + flags, + data, + }) + }, + TrapReason::Termination => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::Restoration => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::SupervisorError(error) => Err(error)?, } - Some(TrapReason::SupervisorError(error)) => Err(error)?, - None => (), } // Check the exact type of the error. @@ -178,7 +197,7 @@ pub(crate) fn to_execution_result( Err("validation error")?, // Any other kind of a trap should result in a failure. Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err("contract trapped during execution")?, + Err(Error::::ContractTrapped)? } } @@ -280,7 +299,8 @@ fn read_sandbox_memory( )?; let mut buf = vec![0u8; len as usize]; - ctx.memory.get(ptr, buf.as_mut_slice()).map_err(|_| sp_sandbox::HostError)?; + ctx.memory.get(ptr, buf.as_mut_slice()) + .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; Ok(buf) } @@ -304,7 +324,7 @@ fn read_sandbox_memory_into_buf( RuntimeToken::ReadMemory(buf.len() as u32), )?; - ctx.memory.get(ptr, buf).map_err(Into::into) + ctx.memory.get(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) } /// Read designated chunk from the sandbox memory, consuming an appropriate amount of @@ -322,7 +342,7 @@ fn read_sandbox_memory_as( len: u32, ) -> Result { let buf = read_sandbox_memory(ctx, ptr, len)?; - D::decode(&mut &buf[..]).map_err(|_| sp_sandbox::HostError) + D::decode(&mut &buf[..]).map_err(|_| store_err(ctx, Error::::DecodingFailed)) } /// Write the given buffer to the designated location in the sandbox memory, consuming @@ -345,9 +365,8 @@ fn write_sandbox_memory( RuntimeToken::WriteMemory(buf.len() as u32), )?; - ctx.memory.set(ptr, buf)?; - - Ok(()) + ctx.memory.set(ptr, buf) + .map_err(|_| store_err(ctx, Error::::OutOfBounds)) } /// Write the given buffer and its length to the designated locations in sandbox memory. @@ -379,7 +398,7 @@ fn write_sandbox_output( let len: u32 = read_sandbox_memory_as(ctx, out_len_ptr, 4)?; if len < buf_len { - Err(map_err(ctx, Error::::OutputBufferTooSmall))? + Err(store_err(ctx, Error::::OutputBufferTooSmall))? } charge_gas( @@ -398,7 +417,7 @@ fn write_sandbox_output( /// Stores a DispatchError returned from an Ext function into the trap_reason. /// /// This allows through supervisor generated errors to the caller. -fn map_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError where +fn store_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError where E: Ext, Error: Into, { @@ -406,12 +425,86 @@ fn map_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError sp_sandbox::HostError } +/// Fallible conversion of `DispatchError` to `ReturnCode`. +fn err_into_return_code(from: DispatchError) -> Result { + use ReturnCode::*; + + let below_sub = Error::::BelowSubsistenceThreshold.into(); + let transfer_failed = Error::::TransferFailed.into(); + let not_funded = Error::::NewContractNotFunded.into(); + let no_code = Error::::CodeNotFound.into(); + let invalid_contract = Error::::NotCallable.into(); + + match from { + x if x == below_sub => Ok(BelowSubsistenceThreshold), + x if x == transfer_failed => Ok(TransferFailed), + x if x == not_funded => Ok(NewContractNotFunded), + x if x == no_code => Ok(CodeNotFound), + x if x == invalid_contract => Ok(NotCallable), + err => Err(err) + } +} + +/// Fallible conversion of a `ExecResult` to `ReturnCode`. +fn exec_into_return_code(from: ExecResult) -> Result { + use crate::exec::ErrorOrigin::Callee; + + let ExecError { error, origin } = match from { + Ok(retval) => return Ok(retval.into()), + Err(err) => err, + }; + + match (error, origin) { + (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (err, _) => err_into_return_code::(err) + } +} + +/// Used by Runtime API that calls into other contracts. +/// +/// Those need to transform the the `ExecResult` returned from the execution into +/// a `ReturnCode`. If this conversion fails because the `ExecResult` constitutes a +/// a fatal error then this error is stored in the `ExecutionContext` so it can be +/// extracted for display in the UI. +fn map_exec_result(ctx: &mut Runtime, result: ExecResult) + -> Result +{ + match exec_into_return_code::(result) { + Ok(code) => Ok(code), + Err(err) => Err(store_err(ctx, err)), + } +} + +/// Try to convert an error into a `ReturnCode`. +/// +/// Used to decide between fatal and non-fatal errors. +fn map_dispatch_result(ctx: &mut Runtime, result: Result) + -> Result +{ + let err = if let Err(err) = result { + err + } else { + return Ok(ReturnCode::Success) + }; + + match err_into_return_code::(err) { + Ok(code) => Ok(code), + Err(err) => Err(store_err(ctx, err)), + } +} + // *********************************************************** // * AFTER MAKING A CHANGE MAKE SURE TO UPDATE COMPLEXITY.MD * // *********************************************************** // Define a function `fn init_env() -> HostFunctionSet` that returns // a function set which can be imported by an executed contract. +// +// # Note +// +// Any input that leads to a out of bound error (reading or writing) or failing to decode +// data passed to the supervisor will lead to a trap. This is not documented explicitly +// for every function. define_env!(Env, , // Account for used gas. Traps if gas used is greater than gas limit. @@ -441,7 +534,7 @@ define_env!(Env, , // - `value_ptr`: pointer into the linear memory where the value to set is placed. // - `value_len`: the length of the value in bytes. // - // # Errors + // # Traps // // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). @@ -480,12 +573,7 @@ define_env!(Env, , // // # Errors // - // If there is no entry under the given key then this function will return - // `ReturnCode::KeyNotFound`. - // - // # Traps - // - // Traps if the supplied buffer length is smaller than the size of the stored value. + // `ReturnCode::KeyNotFound` ext_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; @@ -508,24 +596,24 @@ define_env!(Env, , // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. // - // # Traps + // # Errors // - // Traps if the transfer wasn't succesful. This can happen when the value transfered - // brings the sender below the existential deposit. Use `ext_terminate` to remove - // the caller contract. + // `ReturnCode::BelowSubsistenceThreshold` + // `ReturnCode::TransferFailed` ext_transfer( ctx, account_ptr: u32, account_len: u32, value_ptr: u32, value_len: u32 - ) => { + ) -> ReturnCode => { let callee: <::T as frame_system::Trait>::AccountId = read_sandbox_memory_as(ctx, account_ptr, account_len)?; let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - ctx.ext.transfer(&callee, value, ctx.gas_meter).map_err(|e| map_err(ctx, e)) + let result = ctx.ext.transfer(&callee, value, ctx.gas_meter); + map_dispatch_result(ctx, result) }, // Make a call to another contract. @@ -551,17 +639,14 @@ define_env!(Env, , // // # Errors // - // `ReturnCode::CalleeReverted`: The callee ran to completion but decided to have its - // changes reverted. The delivery of the output buffer is still possible. - // `ReturnCode::CalleeTrapped`: The callee trapped during execution. All changes are reverted - // and no output buffer is delivered. - // - // # Traps + // An error means that the call wasn't successful output buffer is returned unless + // stated otherwise. // - // - Transfer of balance failed. This call can not bring the sender below the existential - // deposit. Use `ext_terminate` to remove the caller. - // - Callee does not exist. - // - Supplied output buffer is too small. + // `ReturnCode::CalleeReverted`: Output buffer is returned. + // `ReturnCode::CalleeTrapped` + // `ReturnCode::BelowSubsistenceThreshold` + // `ReturnCode::TransferFailed` + // `ReturnCode::NotCallable` ext_call( ctx, callee_ptr: u32, @@ -594,22 +679,16 @@ define_env!(Env, , nested_meter, input_data, ) - .map_err(|_| ()) } // there is not enough gas to allocate for the nested call. - None => Err(()), + None => Err(Error::<::T>::OutOfGas.into()), } }); - match call_outcome { - Ok(output) => { - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; - Ok(output.into()) - }, - Err(_) => { - Ok(ReturnCode::CalleeTrapped) - }, + if let Ok(output) = &call_outcome { + write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; } + map_exec_result(ctx, call_outcome) }, // Instantiate a contract with the specified code hash. @@ -643,19 +722,18 @@ define_env!(Env, , // // # Errors // - // `ReturnCode::CalleeReverted`: The callee's constructor ran to completion but decided to have - // its changes reverted. The delivery of the output buffer is still possible but the - // account was not created and no address is returned. - // `ReturnCode::CalleeTrapped`: The callee trapped during execution. All changes are reverted - // and no output buffer is delivered. The accounts was not created and no address is - // returned. + // Please consult the `ReturnCode` enum declaration for more information on those + // errors. Here we only note things specific to this function. // - // # Traps + // An error means that the account wasn't created and no address or output buffer + // is returned unless stated otherwise. // - // - Transfer of balance failed. This call can not bring the sender below the existential - // deposit. Use `ext_terminate` to remove the caller. - // - Code hash does not exist. - // - Supplied output buffers are too small. + // `ReturnCode::CalleeReverted`: Output buffer is returned. + // `ReturnCode::CalleeTrapped` + // `ReturnCode::BelowSubsistenceThreshold` + // `ReturnCode::TransferFailed` + // `ReturnCode::NewContractNotFunded` + // `ReturnCode::CodeNotFound` ext_instantiate( ctx, code_hash_ptr: u32, @@ -690,26 +768,20 @@ define_env!(Env, , nested_meter, input_data ) - .map_err(|_| ()) } // there is not enough gas to allocate for the nested call. - None => Err(()), + None => Err(Error::<::T>::OutOfGas.into()), } }); - match instantiate_outcome { - Ok((address, output)) => { - if !output.flags.contains(ReturnFlags::REVERT) { - write_sandbox_output( - ctx, address_ptr, address_len_ptr, &address.encode(), true - )?; - } - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; - Ok(output.into()) - }, - Err(_) => { - Ok(ReturnCode::CalleeTrapped) - }, + if let Ok((address, output)) = &instantiate_outcome { + if !output.flags.contains(ReturnFlags::REVERT) { + write_sandbox_output( + ctx, address_ptr, address_len_ptr, &address.encode(), true + )?; + } + write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; } + map_exec_result(ctx, instantiate_outcome.map(|(_id, retval)| retval)) }, // Remove the calling account and transfer remaining balance. @@ -722,6 +794,10 @@ define_env!(Env, , // where all remaining funds of the caller are transfered. // Should be decodable as an `T::AccountId`. Traps otherwise. // - beneficiary_len: length of the address buffer. + // + // # Traps + // + // - The contract is live i.e is already on the call stack. ext_terminate( ctx, beneficiary_ptr: u32, @@ -939,6 +1015,11 @@ define_env!(Env, , // encodes the rent allowance that must be set in the case of successful restoration. // `delta_ptr` is the pointer to the start of a buffer that has `delta_count` storage keys // laid out sequentially. + // + // # Traps + // + // - Tombstone hashes do not match + // - Calling cantract is live i.e is already on the call stack. ext_restore_to( ctx, dest_ptr: u32, -- GitLab From 2bd4f4515fbfd9c7433eb67b5de87549fea0bc89 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 3 Aug 2020 14:41:54 +0200 Subject: [PATCH 044/132] Improve Benchmark Writer: Remove Unused Components, Remove Multiply by Zero, Files Split by Pallet (#6785) * initial improvements * better file management, ignore unused components * Output warning when components unused * update comment * Write even when base weight is zero * remove unwrap where possible * Dont sort components to dedup * undo delete * improve clarity of unused components * remove unused dep * Update Process.json --- Cargo.lock | 1 - Process.json | 5 + frame/benchmarking/src/utils.rs | 7 + utils/frame/benchmarking-cli/Cargo.toml | 1 - utils/frame/benchmarking-cli/src/command.rs | 3 +- utils/frame/benchmarking-cli/src/writer.rs | 193 +++++++++++--------- 6 files changed, 120 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b86507cf072..fe237f2341b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,7 +1572,6 @@ dependencies = [ name = "frame-benchmarking-cli" version = "2.0.0-rc5" dependencies = [ - "Inflector", "frame-benchmarking", "parity-scale-codec", "sc-cli", diff --git a/Process.json b/Process.json index cd15e137df6..540bd644311 100644 --- a/Process.json +++ b/Process.json @@ -21,4 +21,9 @@ "project_name": "Smart Contracts", "owner": "pepyakin", "matrix_room_id": "!yBKstWVBkwzUkPslsp:matrix.parity.io" +}, +{ + "project_name": "Benchmarking and Weights", + "owner": "shawntabrizi", + "matrix_room_id": "!pZPWqCRLVtORZTEsEf:matrix.parity.io" }] diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 7ed9a862a04..5a2bd55ff79 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -30,6 +30,13 @@ pub enum BenchmarkParameter { a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, } +#[cfg(feature = "std")] +impl std::fmt::Display for BenchmarkParameter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + /// The results of a single of benchmark. #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 4c522337259..c34404575e5 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -12,7 +12,6 @@ description = "CLI for benchmarking FRAME" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -Inflector = "0.11.4" frame-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/benchmarking" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 553b68c453f..688e393bd60 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -95,8 +95,7 @@ impl BenchmarkCmd { let mut file = crate::writer::open_file("traits.rs")?; crate::writer::write_trait(&mut file, batches.clone())?; } else { - let mut file = crate::writer::open_file("benchmarks.rs")?; - crate::writer::write_results(&mut file, batches.clone())?; + crate::writer::write_results(&batches)?; } } diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 199dbb795e5..2bc17aa85bd 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -20,13 +20,15 @@ use std::fs::{File, OpenOptions}; use std::io::prelude::*; use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis}; -use inflector::Inflector; +use sp_runtime::traits::Zero; + +const VERSION: &'static str = env!("CARGO_PKG_VERSION"); pub fn open_file(path: &str) -> Result { OpenOptions::new() .create(true) .write(true) - .append(true) + .truncate(true) .open(path) } @@ -47,81 +49,49 @@ pub fn write_trait(file: &mut File, batches: Vec) -> Result<(), if batch.pallet != current_pallet { if !current_pallet.is_empty() { // close trait - write!(file, "}}\n").unwrap(); + write!(file, "}}\n")?; } // trait wrapper - write!(file, "// {}\n", pallet_string).unwrap(); - write!(file, "pub trait WeightInfo {{\n").unwrap(); - - current_pallet = batch.pallet.clone() - } - - // function name - write!(file, "\tfn {}(", benchmark_string).unwrap(); - - // params - let components = &batch.results[0].components; - for component in components { - write!(file, "{:?}: u32, ", component.0).unwrap(); - } - // return value - write!(file, ") -> Weight;\n").unwrap(); - } - - // final close trait - write!(file, "}}\n").unwrap(); - - // Reset - current_pallet = Vec::::new(); - - for batch in &batches { - if batch.results.is_empty() { continue } - - let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); - - // only create new trait definitions when we go to a new pallet - if batch.pallet != current_pallet { - if !current_pallet.is_empty() { - // close trait - write!(file, "}}\n").unwrap(); - } - - // impl trait - write!(file, "\n").unwrap(); - write!(file, "impl WeightInfo for () {{\n").unwrap(); + write!(file, "// {}\n", pallet_string)?; + write!(file, "pub trait WeightInfo {{\n")?; current_pallet = batch.pallet.clone() } // function name - write!(file, "\tfn {}(", benchmark_string).unwrap(); + write!(file, "\tfn {}(", benchmark_string)?; // params let components = &batch.results[0].components; for component in components { - write!(file, "_{:?}: u32, ", component.0).unwrap(); + write!(file, "{:?}: u32, ", component.0)?; } // return value - write!(file, ") -> Weight {{ 1_000_000_000 }}\n").unwrap(); + write!(file, ") -> Weight;\n")?; } // final close trait - write!(file, "}}\n").unwrap(); + write!(file, "}}\n")?; Ok(()) } -pub fn write_results(file: &mut File, batches: Vec) -> Result<(), std::io::Error> { +pub fn write_results(batches: &[BenchmarkBatch]) -> Result<(), std::io::Error> { let mut current_pallet = Vec::::new(); // Skip writing if there are no batches if batches.is_empty() { return Ok(()) } - // general imports - write!(file, "use frame_support::weights::{{Weight, constants::RocksDbWeight as DbWeight}};\n").unwrap(); + let mut batches_iter = batches.iter().peekable(); - for batch in &batches { + let first_pallet = String::from_utf8( + batches_iter.peek().expect("we checked that batches is not empty").pallet.clone() + ).unwrap(); + let mut file = open_file(&(first_pallet + ".rs"))?; + + + while let Some(batch) = batches_iter.next() { // Skip writing if there are no results if batch.results.is_empty() { continue } @@ -130,69 +100,120 @@ pub fn write_results(file: &mut File, batches: Vec) -> Result<() // only create new trait definitions when we go to a new pallet if batch.pallet != current_pallet { - if !current_pallet.is_empty() { - // close trait - write!(file, "}}\n").unwrap(); - } + // auto-generation note + write!( + file, + "//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {}\n\n", + VERSION, + )?; + + // general imports + write!( + file, + "use frame_support::weights::{{Weight, constants::RocksDbWeight as DbWeight}};\n\n" + )?; // struct for weights - write!(file, "pub struct WeightFor{};\n", - pallet_string.to_pascal_case(), - ).unwrap(); + write!(file, "pub struct WeightInfo;\n")?; // trait wrapper - write!(file, "impl {}::WeightInfo for WeightFor{} {{\n", - pallet_string, - pallet_string.to_pascal_case(), - ).unwrap(); + write!(file, "impl {}::WeightInfo for WeightInfo {{\n", pallet_string)?; current_pallet = batch.pallet.clone() } - // function name - write!(file, "\tfn {}(", benchmark_string).unwrap(); + // Analysis results + let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap(); + let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap(); + let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap(); + // Analysis data may include components that are not used, this filters out anything whose value is zero. + let mut used_components = Vec::new(); + let mut used_extrinsic_time = Vec::new(); + let mut used_reads = Vec::new(); + let mut used_writes = Vec::new(); + extrinsic_time.slopes.iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_extrinsic_time.push((slope, name)); + } + }); + reads.slopes.iter().zip(reads.names.iter()).for_each(|(slope, name)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_reads.push((slope, name)); + } + }); + writes.slopes.iter().zip(writes.names.iter()).for_each(|(slope, name)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_writes.push((slope, name)); + } + }); + + let all_components = batch.results[0].components + .iter() + .map(|(name, _)| -> String { return name.to_string() }) + .collect::>(); + if all_components.len() != used_components.len() { + let mut unused_components = all_components; + unused_components.retain(|x| !used_components.contains(&x)); + write!(file, "\t// WARNING! Some components were not used: {:?}\n", unused_components)?; + } + + // function name + write!(file, "\tfn {}(", benchmark_string)?; // params - let components = &batch.results[0].components; - for component in components { - write!(file, "{:?}: u32, ", component.0).unwrap(); + for component in used_components { + write!(file, "{}: u32, ", component)?; } // return value - write!(file, ") -> Weight {{\n").unwrap(); + write!(file, ") -> Weight {{\n")?; - let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap(); - write!(file, "\t\t({} as Weight)\n", extrinsic_time.base.saturating_mul(1000)).unwrap(); - extrinsic_time.slopes.iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { + write!(file, "\t\t({} as Weight)\n", extrinsic_time.base.saturating_mul(1000))?; + used_extrinsic_time.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { write!(file, "\t\t\t.saturating_add(({} as Weight).saturating_mul({} as Weight))\n", slope.saturating_mul(1000), name, - ).unwrap(); - }); + ) + })?; - let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap(); - write!(file, "\t\t\t.saturating_add(DbWeight::get().reads({} as Weight))\n", reads.base).unwrap(); - reads.slopes.iter().zip(reads.names.iter()).for_each(|(slope, name)| { + if !reads.base.is_zero() { + write!(file, "\t\t\t.saturating_add(DbWeight::get().reads({} as Weight))\n", reads.base)?; + } + used_reads.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { write!(file, "\t\t\t.saturating_add(DbWeight::get().reads(({} as Weight).saturating_mul({} as Weight)))\n", slope, name, - ).unwrap(); - }); + ) + })?; - let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap(); - write!(file, "\t\t\t.saturating_add(DbWeight::get().writes({} as Weight))\n", writes.base).unwrap(); - writes.slopes.iter().zip(writes.names.iter()).for_each(|(slope, name)| { + if !writes.base.is_zero() { + write!(file, "\t\t\t.saturating_add(DbWeight::get().writes({} as Weight))\n", writes.base)?; + } + used_writes.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { write!(file, "\t\t\t.saturating_add(DbWeight::get().writes(({} as Weight).saturating_mul({} as Weight)))\n", slope, name, - ).unwrap(); - }); + ) + })?; // close function - write!(file, "\t}}\n").unwrap(); + write!(file, "\t}}\n")?; + + // Check if this is the end of the iterator + if let Some(next) = batches_iter.peek() { + // Next pallet is different than current pallet, so we close up the file and open a new one. + if next.pallet != current_pallet { + write!(file, "}}\n")?; + let next_pallet = String::from_utf8(next.pallet.clone()).unwrap(); + file = open_file(&(next_pallet + ".rs"))?; + } + } else { + // This is the end of the iterator, so we close up the final file. + write!(file, "}}\n")?; + } } - // final close trait - write!(file, "}}\n").unwrap(); - Ok(()) } -- GitLab From 2afbcf7e57c005f5601f6fc40353594923060a88 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 3 Aug 2020 15:26:09 +0200 Subject: [PATCH 045/132] Add integrity test for slash defer duration (#6782) * Add integrity test for slash defer duration * Wrap in externalities * Update frame/staking/src/lib.rs --- frame/staking/src/lib.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index cd820051b15..60943be82f1 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1459,6 +1459,17 @@ decl_module! { // `on_finalize` weight is tracked in `on_initialize` } + fn integrity_test() { + sp_io::TestExternalities::new_empty().execute_with(|| + assert!( + T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, + "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", + T::SlashDeferDuration::get(), + T::BondingDuration::get(), + ) + ); + } + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will /// be the account that controls it. /// -- GitLab From 584588b7d9e88a8253ef5f48abd477505714f462 Mon Sep 17 00:00:00 2001 From: Ashley Date: Mon, 3 Aug 2020 15:48:32 +0200 Subject: [PATCH 046/132] Convert spaces to tabs (#6799) --- bin/node/bench/src/core.rs | 24 +- bin/node/bench/src/txpool.rs | 6 +- bin/node/browser-testing/src/lib.rs | 44 +-- client/proposer-metrics/src/lib.rs | 22 +- client/service/test/src/client/light.rs | 4 +- frame/contracts/src/benchmarking.rs | 358 +++++++++--------- frame/staking/src/tests.rs | 6 +- primitives/consensus/common/src/metrics.rs | 2 +- primitives/core/src/sr25519.rs | 12 +- .../runtime/src/offchain/storage_lock.rs | 44 +-- utils/fork-tree/src/lib.rs | 2 +- 11 files changed, 262 insertions(+), 262 deletions(-) diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index c1b1711549b..6faa7b72721 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -93,26 +93,26 @@ pub enum Mode { } impl std::str::FromStr for Mode { - type Err = &'static str; - fn from_str(day: &str) -> Result { - match day { - "regular" => Ok(Mode::Regular), - "profile" => Ok(Mode::Profile), - _ => Err("Could not parse mode"), - } - } + type Err = &'static str; + fn from_str(day: &str) -> Result { + match day { + "regular" => Ok(Mode::Regular), + "profile" => Ok(Mode::Profile), + _ => Err("Could not parse mode"), + } + } } impl fmt::Display for BenchmarkOutput { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( f, "{}: avg {}, w_avg {}", self.name, NsFormatter(self.raw_average), NsFormatter(self.average), ) - } + } } pub fn run_benchmark( @@ -159,4 +159,4 @@ macro_rules! matrix( } }; () => { vec![] } -); \ No newline at end of file +); diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index d6e5578192e..7ea13fc15ec 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -32,7 +32,7 @@ use sp_transaction_pool::{TransactionPool, TransactionSource}; use crate::core::{self, Path, Mode}; pub struct PoolBenchmarkDescription { - pub database_type: DatabaseType, + pub database_type: DatabaseType, } pub struct PoolBenchmark { @@ -41,7 +41,7 @@ pub struct PoolBenchmark { impl core::BenchmarkDescription for PoolBenchmarkDescription { fn path(&self) -> Path { - Path::new(&["node", "txpool"]) + Path::new(&["node", "txpool"]) } fn setup(self: Box) -> Box { @@ -55,7 +55,7 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { } fn name(&self) -> Cow<'static, str> { - "Transaction pool benchmark".into() + "Transaction pool benchmark".into() } } diff --git a/bin/node/browser-testing/src/lib.rs b/bin/node/browser-testing/src/lib.rs index c943a383aef..777e5ea9f13 100644 --- a/bin/node/browser-testing/src/lib.rs +++ b/bin/node/browser-testing/src/lib.rs @@ -37,34 +37,34 @@ use serde::de::DeserializeOwned; wasm_bindgen_test_configure!(run_in_browser); fn rpc_call(method: &str) -> String { - serde_json::to_string(&MethodCall { - jsonrpc: Some(Version::V2), - method: method.into(), - params: Params::None, - id: Id::Num(1) - }).unwrap() + serde_json::to_string(&MethodCall { + jsonrpc: Some(Version::V2), + method: method.into(), + params: Params::None, + id: Id::Num(1) + }).unwrap() } fn deserialize_rpc_result(js_value: JsValue) -> T { - let string = js_value.as_string().unwrap(); - let value = serde_json::from_str::(&string).unwrap().result; - // We need to convert a `Value::Object` into a proper type. - let value_string = serde_json::to_string(&value).unwrap(); - serde_json::from_str(&value_string).unwrap() + let string = js_value.as_string().unwrap(); + let value = serde_json::from_str::(&string).unwrap().result; + // We need to convert a `Value::Object` into a proper type. + let value_string = serde_json::to_string(&value).unwrap(); + serde_json::from_str(&value_string).unwrap() } #[wasm_bindgen_test] async fn runs() { - let mut client = node_cli::start_client(None, "info".into()) - .await - .unwrap(); + let mut client = node_cli::start_client(None, "info".into()) + .await + .unwrap(); - // Check that the node handles rpc calls. - // TODO: Re-add the code that checks if the node is syncing. - let chain_name: String = deserialize_rpc_result( - JsFuture::from(client.rpc_send(&rpc_call("system_chain"))) - .await - .unwrap() - ); - assert_eq!(chain_name, "Development"); + // Check that the node handles rpc calls. + // TODO: Re-add the code that checks if the node is syncing. + let chain_name: String = deserialize_rpc_result( + JsFuture::from(client.rpc_send(&rpc_call("system_chain"))) + .await + .unwrap() + ); + assert_eq!(chain_name, "Development"); } diff --git a/client/proposer-metrics/src/lib.rs b/client/proposer-metrics/src/lib.rs index 5cb749f4a26..50498d40b62 100644 --- a/client/proposer-metrics/src/lib.rs +++ b/client/proposer-metrics/src/lib.rs @@ -41,8 +41,8 @@ impl MetricsLink { /// Authorship metrics. #[derive(Clone)] pub struct Metrics { - pub block_constructed: Histogram, - pub number_of_transactions: Gauge, + pub block_constructed: Histogram, + pub number_of_transactions: Gauge, } impl Metrics { @@ -54,14 +54,14 @@ impl Metrics { "Histogram of time taken to construct new block", ))?, registry, - )?, - number_of_transactions: register( - Gauge::new( - "proposer_number_of_transactions", - "Number of transactions included in block", - )?, - registry, - )?, + )?, + number_of_transactions: register( + Gauge::new( + "proposer_number_of_transactions", + "Number of transactions included in block", + )?, + registry, + )?, }) - } + } } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 031c234c1ab..ffc84ad47b8 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -686,7 +686,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { match local_result == expected_result { true => (), false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), + index, local_result, expected_result)), } } } @@ -843,7 +843,7 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { Box::new(TaskExecutor::new()), ); assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); + remote_proof.roots_proof.clone()).is_err()); // fails when proof is broken let mut local_storage = DummyStorage::new(); diff --git a/frame/contracts/src/benchmarking.rs b/frame/contracts/src/benchmarking.rs index 29f992643ef..4bdb14576ee 100644 --- a/frame/contracts/src/benchmarking.rs +++ b/frame/contracts/src/benchmarking.rs @@ -28,21 +28,21 @@ use parity_wasm::elements::FuncBody; use sp_runtime::traits::Hash; macro_rules! load_module { - ($name:expr) => {{ - let code = include_bytes!(concat!("../fixtures/benchmarks/", $name, ".wat")); - compile_module::(code) - }}; + ($name:expr) => {{ + let code = include_bytes!(concat!("../fixtures/benchmarks/", $name, ".wat")); + compile_module::(code) + }}; } fn compile_module(code: &[u8]) -> (Vec, ::Output) { - let code = sp_std::str::from_utf8(code).expect("Invalid utf8 in wat file."); - let binary = wat::parse_str(code).expect("Failed to compile wat file."); - let hash = T::Hashing::hash(&binary); - (binary, hash) + let code = sp_std::str::from_utf8(code).expect("Invalid utf8 in wat file."); + let binary = wat::parse_str(code).expect("Failed to compile wat file."); + let hash = T::Hashing::hash(&binary); + (binary, hash) } fn funding() -> BalanceOf { - T::Currency::minimum_balance() * 10_000.into() + T::Currency::minimum_balance() * 10_000.into() } fn create_funded_user(string: &'static str, n: u32) -> T::AccountId { @@ -52,55 +52,55 @@ fn create_funded_user(string: &'static str, n: u32) -> T::AccountId { } fn contract_with_call_body(body: FuncBody) -> (Vec, ::Output) { - use parity_wasm::elements::{ - Instructions, Instruction::End, - }; - let contract = parity_wasm::builder::ModuleBuilder::new() - // deploy function (idx 0) - .function() - .signature().with_params(vec![]).with_return_type(None).build() - .body().with_instructions(Instructions::new(vec![End])).build() - .build() - // call function (idx 1) - .function() - .signature().with_params(vec![]).with_return_type(None).build() - .with_body(body) - .build() - .export().field("deploy").internal().func(0).build() - .export().field("call").internal().func(1).build() - .build(); - let bytes = contract.to_bytes().unwrap(); - let hash = T::Hashing::hash(&bytes); - (bytes, hash) + use parity_wasm::elements::{ + Instructions, Instruction::End, + }; + let contract = parity_wasm::builder::ModuleBuilder::new() + // deploy function (idx 0) + .function() + .signature().with_params(vec![]).with_return_type(None).build() + .body().with_instructions(Instructions::new(vec![End])).build() + .build() + // call function (idx 1) + .function() + .signature().with_params(vec![]).with_return_type(None).build() + .with_body(body) + .build() + .export().field("deploy").internal().func(0).build() + .export().field("call").internal().func(1).build() + .build(); + let bytes = contract.to_bytes().unwrap(); + let hash = T::Hashing::hash(&bytes); + (bytes, hash) } fn expanded_contract(target_bytes: u32) -> (Vec, ::Output) { - use parity_wasm::elements::{ - Instruction::{self, If, I32Const, Return, End}, - BlockType, Instructions, - }; - // Base size of a contract is 47 bytes and each expansion adds 6 bytes. - // We do one expansion less to account for the code section and function body - // size fields inside the binary wasm module representation which are leb128 encoded - // and therefore grow in size when the contract grows. We are not allowed to overshoot - // because of the maximum code size that is enforced by `put_code`. - let expansions = (target_bytes.saturating_sub(47) / 6).saturating_sub(1) as usize; - const EXPANSION: [Instruction; 4] = [ - I32Const(0), - If(BlockType::NoResult), - Return, - End, - ]; - let instructions = Instructions::new( - EXPANSION - .iter() - .cycle() - .take(EXPANSION.len() * expansions) - .cloned() - .chain(sp_std::iter::once(End)) - .collect() - ); - contract_with_call_body::(FuncBody::new(Vec::new(), instructions)) + use parity_wasm::elements::{ + Instruction::{self, If, I32Const, Return, End}, + BlockType, Instructions, + }; + // Base size of a contract is 47 bytes and each expansion adds 6 bytes. + // We do one expansion less to account for the code section and function body + // size fields inside the binary wasm module representation which are leb128 encoded + // and therefore grow in size when the contract grows. We are not allowed to overshoot + // because of the maximum code size that is enforced by `put_code`. + let expansions = (target_bytes.saturating_sub(47) / 6).saturating_sub(1) as usize; + const EXPANSION: [Instruction; 4] = [ + I32Const(0), + If(BlockType::NoResult), + Return, + End, + ]; + let instructions = Instructions::new( + EXPANSION + .iter() + .cycle() + .take(EXPANSION.len() * expansions) + .cloned() + .chain(sp_std::iter::once(End)) + .collect() + ); + contract_with_call_body::(FuncBody::new(Vec::new(), instructions)) } fn advance_block(num: ::BlockNumber) { @@ -109,161 +109,161 @@ fn advance_block(num: ::BlockNumber) { } benchmarks! { - _ { - } + _ { + } - // This extrinsic is pretty much constant as it is only a simple setter. - update_schedule { - let schedule = Schedule { - version: 1, - .. Default::default() - }; - }: _(RawOrigin::Root, schedule) + // This extrinsic is pretty much constant as it is only a simple setter. + update_schedule { + let schedule = Schedule { + version: 1, + .. Default::default() + }; + }: _(RawOrigin::Root, schedule) - // This constructs a contract that is maximal expensive to instrument. - // It creates a maximum number of metering blocks per byte. - put_code { - let n in 0 .. Contracts::::current_schedule().max_code_size; - let caller = create_funded_user::("caller", 0); - let (binary, hash) = expanded_contract::(n); - }: _(RawOrigin::Signed(caller), binary) + // This constructs a contract that is maximal expensive to instrument. + // It creates a maximum number of metering blocks per byte. + put_code { + let n in 0 .. Contracts::::current_schedule().max_code_size; + let caller = create_funded_user::("caller", 0); + let (binary, hash) = expanded_contract::(n); + }: _(RawOrigin::Signed(caller), binary) - // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. - // The size of the data has no influence on the costs of this extrinsic as long as the contract - // won't call `ext_input` in its constructor to copy the data to contract memory. - // The dummy contract used here does not do this. The costs for the data copy is billed as - // part of `ext_input`. - instantiate { - let data = vec![0u8; 128]; - let endowment = Config::::subsistence_threshold_uncached(); - let caller = create_funded_user::("caller", 0); - let (binary, hash) = load_module!("dummy"); - Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) - .unwrap(); + // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. + // The size of the data has no influence on the costs of this extrinsic as long as the contract + // won't call `ext_input` in its constructor to copy the data to contract memory. + // The dummy contract used here does not do this. The costs for the data copy is billed as + // part of `ext_input`. + instantiate { + let data = vec![0u8; 128]; + let endowment = Config::::subsistence_threshold_uncached(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); - }: _( - RawOrigin::Signed(caller.clone()), - endowment, - Weight::max_value(), - hash, - data - ) - verify { - assert_eq!( - funding::() - endowment, - T::Currency::free_balance(&caller), - ) - } + }: _( + RawOrigin::Signed(caller.clone()), + endowment, + Weight::max_value(), + hash, + data + ) + verify { + assert_eq!( + funding::() - endowment, + T::Currency::free_balance(&caller), + ) + } - // We just call a dummy contract to measure to overhead of the call extrinsic. - // As for instantiate the size of the data does not influence the costs. - call { - let data = vec![0u8; 128]; - let endowment = Config::::subsistence_threshold_uncached(); - let value = T::Currency::minimum_balance() * 100.into(); - let caller = create_funded_user::("caller", 0); - let (binary, hash) = load_module!("dummy"); - let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); - Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) - .unwrap(); - Contracts::::instantiate( - RawOrigin::Signed(caller.clone()).into(), - endowment, - Weight::max_value(), - hash, - vec![], - ).unwrap(); - }: _( - RawOrigin::Signed(caller.clone()), - T::Lookup::unlookup(addr), - value, - Weight::max_value(), - data - ) - verify { - assert_eq!( - funding::() - endowment - value, - T::Currency::free_balance(&caller), - ) - } + // We just call a dummy contract to measure to overhead of the call extrinsic. + // As for instantiate the size of the data does not influence the costs. + call { + let data = vec![0u8; 128]; + let endowment = Config::::subsistence_threshold_uncached(); + let value = T::Currency::minimum_balance() * 100.into(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); + Contracts::::instantiate( + RawOrigin::Signed(caller.clone()).into(), + endowment, + Weight::max_value(), + hash, + vec![], + ).unwrap(); + }: _( + RawOrigin::Signed(caller.clone()), + T::Lookup::unlookup(addr), + value, + Weight::max_value(), + data + ) + verify { + assert_eq!( + funding::() - endowment - value, + T::Currency::free_balance(&caller), + ) + } - // We benchmark the costs for sucessfully evicting an empty contract. - // The actual costs are depending on how many storage items the evicted contract - // does have. However, those costs are not to be payed by the sender but - // will be distributed over multiple blocks using a scheduler. Otherwise there is - // no incentive to remove large contracts when the removal is more expensive than - // the reward for removing them. - claim_surcharge { - let endowment = Config::::subsistence_threshold_uncached(); - let value = T::Currency::minimum_balance() * 100.into(); - let caller = create_funded_user::("caller", 0); - let (binary, hash) = load_module!("dummy"); - let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); - Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) - .unwrap(); - Contracts::::instantiate( - RawOrigin::Signed(caller.clone()).into(), - endowment, - Weight::max_value(), - hash, - vec![], - ).unwrap(); + // We benchmark the costs for sucessfully evicting an empty contract. + // The actual costs are depending on how many storage items the evicted contract + // does have. However, those costs are not to be payed by the sender but + // will be distributed over multiple blocks using a scheduler. Otherwise there is + // no incentive to remove large contracts when the removal is more expensive than + // the reward for removing them. + claim_surcharge { + let endowment = Config::::subsistence_threshold_uncached(); + let value = T::Currency::minimum_balance() * 100.into(); + let caller = create_funded_user::("caller", 0); + let (binary, hash) = load_module!("dummy"); + let addr = T::DetermineContractAddress::contract_address_for(&hash, &[], &caller); + Contracts::::put_code(RawOrigin::Signed(caller.clone()).into(), binary.to_vec()) + .unwrap(); + Contracts::::instantiate( + RawOrigin::Signed(caller.clone()).into(), + endowment, + Weight::max_value(), + hash, + vec![], + ).unwrap(); - // instantiate should leave us with an alive contract - ContractInfoOf::::get(addr.clone()).unwrap().get_alive().unwrap(); + // instantiate should leave us with an alive contract + ContractInfoOf::::get(addr.clone()).unwrap().get_alive().unwrap(); - // generate some rent - advance_block::(::SignedClaimHandicap::get() + 1.into()); + // generate some rent + advance_block::(::SignedClaimHandicap::get() + 1.into()); - }: _(RawOrigin::Signed(caller.clone()), addr.clone(), None) - verify { - // the claim surcharge should have evicted the contract - ContractInfoOf::::get(addr.clone()).unwrap().get_tombstone().unwrap(); + }: _(RawOrigin::Signed(caller.clone()), addr.clone(), None) + verify { + // the claim surcharge should have evicted the contract + ContractInfoOf::::get(addr.clone()).unwrap().get_tombstone().unwrap(); - // the caller should get the reward for being a good snitch - assert_eq!( - funding::() - endowment + ::SurchargeReward::get(), - T::Currency::free_balance(&caller), - ); - } + // the caller should get the reward for being a good snitch + assert_eq!( + funding::() - endowment + ::SurchargeReward::get(), + T::Currency::free_balance(&caller), + ); + } } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; + use super::*; + use crate::tests::{ExtBuilder, Test}; + use frame_support::assert_ok; - #[test] - fn update_schedule() { + #[test] + fn update_schedule() { ExtBuilder::default().build().execute_with(|| { assert_ok!(test_benchmark_update_schedule::()); }); - } + } - #[test] - fn put_code() { + #[test] + fn put_code() { ExtBuilder::default().build().execute_with(|| { assert_ok!(test_benchmark_put_code::()); }); - } + } - #[test] - fn instantiate() { + #[test] + fn instantiate() { ExtBuilder::default().build().execute_with(|| { assert_ok!(test_benchmark_instantiate::()); }); - } + } - #[test] - fn call() { + #[test] + fn call() { ExtBuilder::default().build().execute_with(|| { assert_ok!(test_benchmark_call::()); }); - } + } - #[test] - fn claim_surcharge() { + #[test] + fn claim_surcharge() { ExtBuilder::default().build().execute_with(|| { assert_ok!(test_benchmark_claim_surcharge::()); }); diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index a957b6ef33a..e5015cbdc92 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2861,9 +2861,9 @@ mod offchain_phragmen { let (offchain, offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); - let mut seed = [0_u8; 32]; - seed[0..4].copy_from_slice(&iterations.to_le_bytes()); - offchain_state.write().seed = seed; + let mut seed = [0_u8; 32]; + seed[0..4].copy_from_slice(&iterations.to_le_bytes()); + offchain_state.write().seed = seed; ext.register_extension(OffchainExt::new(offchain)); ext.register_extension(TransactionPoolExt::new(pool)); diff --git a/primitives/consensus/common/src/metrics.rs b/primitives/consensus/common/src/metrics.rs index 90df85a2948..f9326fac062 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/primitives/consensus/common/src/metrics.rs @@ -48,7 +48,7 @@ impl Metrics { &["result"], )?, registry, - )?, + )?, }) } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 9e9aaf53bbf..b015347e9aa 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -399,15 +399,15 @@ impl TraitPublic for Public { } impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } + fn from(key: Public) -> Self { + (&key).into() + } } impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } } #[cfg(feature = "std")] diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 9d4e671db6e..a3838f21fd1 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -560,34 +560,34 @@ mod tests { offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); // the lock is still active, extend it successfully - assert_eq!(guard.extend_lock().is_ok(), true); + assert_eq!(guard.extend_lock().is_ok(), true); - // sleep_until < deadline - offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); + // sleep_until < deadline + offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); - // the lock is still active, try_lock will fail - let mut lock = StorageLock::<'_, Time>::with_deadline(b"lock_4", lock_expiration); - let res = lock.try_lock(); - assert_eq!(res.is_ok(), false); + // the lock is still active, try_lock will fail + let mut lock = StorageLock::<'_, Time>::with_deadline(b"lock_4", lock_expiration); + let res = lock.try_lock(); + assert_eq!(res.is_ok(), false); - // sleep again untill sleep_until > deadline - offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); + // sleep again untill sleep_until > deadline + offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); - // the lock has expired, failed to extend it - assert_eq!(guard.extend_lock().is_ok(), false); - guard.forget(); + // the lock has expired, failed to extend it + assert_eq!(guard.extend_lock().is_ok(), false); + guard.forget(); - // try_lock will succeed - let mut lock = StorageLock::<'_, Time>::with_deadline(b"lock_4", lock_expiration); - let res = lock.try_lock(); - assert!(res.is_ok()); - let guard = res.unwrap(); + // try_lock will succeed + let mut lock = StorageLock::<'_, Time>::with_deadline(b"lock_4", lock_expiration); + let res = lock.try_lock(); + assert!(res.is_ok()); + let guard = res.unwrap(); - guard.forget(); - }); + guard.forget(); + }); - // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_4"); - assert_eq!(opt.unwrap(), vec![132_u8, 3u8, 0, 0, 0, 0, 0, 0]); // 132 + 256 * 3 = 900 + // lock must have been cleared at this point + let opt = state.read().persistent_storage.get(b"", b"lock_4"); + assert_eq!(opt.unwrap(), vec![132_u8, 3u8, 0, 0, 0, 0, 0, 0]); // 132 + 256 * 3 = 900 } } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index e11c1138f49..1d01c534176 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -132,7 +132,7 @@ impl ForkTree where let mut root = root .expect("find_node_index_where will return array with at least one index; \ - this results in at least one item in removed; qed"); + this results in at least one item in removed; qed"); let mut removed = old_roots; -- GitLab From 0330f629dd1aed9d7dbe44e9b89d995444aa99d0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 3 Aug 2020 16:55:32 +0200 Subject: [PATCH 047/132] Add details to legacy requests (#6747) --- client/network/src/protocol.rs | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index d3a729cc8d5..630471414b2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -48,7 +48,10 @@ use sp_runtime::traits::{ use sp_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, Roles}; -use prometheus_endpoint::{Registry, Gauge, Counter, GaugeVec, PrometheusError, Opts, register, U64}; +use prometheus_endpoint::{ + Registry, Gauge, Counter, CounterVec, GaugeVec, + PrometheusError, Opts, register, U64 +}; use sync::{ChainSync, SyncState}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map::Entry}; @@ -142,7 +145,7 @@ struct Metrics { finality_proofs: GaugeVec, justifications: GaugeVec, propagated_transactions: Counter, - legacy_requests_received: Counter, + legacy_requests_received: CounterVec, } impl Metrics { @@ -188,9 +191,12 @@ impl Metrics { "sync_propagated_transactions", "Number of transactions propagated to at least one peer", )?, r)?, - legacy_requests_received: register(Counter::new( - "sync_legacy_requests_received", - "Number of block/finality/light-client requests received on the legacy substream", + legacy_requests_received: register(CounterVec::new( + Opts::new( + "sync_legacy_requests_received", + "Number of block/finality/light-client requests received on the legacy substream", + ), + &["kind"] )?, r)?, }) } @@ -719,7 +725,7 @@ impl Protocol { fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["block-request"]).inc(); } trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", @@ -1395,7 +1401,7 @@ impl Protocol { ); if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["remote-call"]).inc(); } let proof = match self.context_data.chain.execution_proof( @@ -1519,7 +1525,7 @@ impl Protocol { request: message::RemoteReadRequest, ) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["remote-read"]).inc(); } if request.keys.is_empty() { @@ -1572,7 +1578,7 @@ impl Protocol { request: message::RemoteReadChildRequest, ) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["remote-child"]).inc(); } if request.keys.is_empty() { @@ -1632,7 +1638,7 @@ impl Protocol { request: message::RemoteHeaderRequest>, ) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["remote-header"]).inc(); } trace!(target: "sync", "Remote header proof request {} from {} ({})", @@ -1666,7 +1672,7 @@ impl Protocol { request: message::RemoteChangesRequest, ) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["remote-changes"]).inc(); } trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", @@ -1733,7 +1739,7 @@ impl Protocol { request: message::FinalityProofRequest, ) { if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.inc(); + metrics.legacy_requests_received.with_label_values(&["finality-proof"]).inc(); } trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); -- GitLab From de8a0d5a2697b0c43700536b9854ccfa50ca93c8 Mon Sep 17 00:00:00 2001 From: Alex Siman Date: Mon, 3 Aug 2020 18:04:56 +0300 Subject: [PATCH 048/132] Add ss58 address for Subsocial (#6800) --- primitives/core/src/crypto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 0cdbebde9f2..2e71e676b3e 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -446,6 +446,8 @@ ss58_address_format!( (18, "darwinia", "Darwinia Chain mainnet, standard account (*25519).") StafiAccount => (20, "stafi", "Stafi mainnet, standard account (*25519).") + SubsocialAccount => + (28, "subsocial", "Subsocial network, standard account (*25519).") RobonomicsAccount => (32, "robonomics", "Any Robonomics network standard account (*25519).") DataHighwayAccount => -- GitLab From 270a992bf299cd41b2ed4e211814d2a958e3bfc8 Mon Sep 17 00:00:00 2001 From: Shaopeng Wang Date: Tue, 4 Aug 2020 20:02:24 +1200 Subject: [PATCH 049/132] Add mutate_exists to StorageDoubleMap. (#6704) --- frame/support/src/lib.rs | 17 +++++++++++++++++ .../support/src/storage/generator/double_map.rs | 9 +++++++++ frame/support/src/storage/mod.rs | 7 +++++++ 3 files changed, 33 insertions(+) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index bb8aacd1a48..f0ffdc90a74 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -629,6 +629,23 @@ mod tests { }); } + #[test] + fn double_map_mutate_exists_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = DataDM; + + let (key1, key2) = (11, 13); + + // mutated + DoubleMap::mutate_exists(key1, key2, |v| *v = Some(1)); + assert_eq!(DoubleMap::get(&key1, key2), 1); + + // removed if mutated to `None` + DoubleMap::mutate_exists(key1, key2, |v| *v = None); + assert!(!DoubleMap::contains_key(&key1, key2)); + }); + } + #[test] fn double_map_try_mutate_exists_should_work() { new_test_ext().execute_with(|| { diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 8fbef16204f..3c82f4156a2 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -228,6 +228,15 @@ impl storage::StorageDoubleMap for G where Self::try_mutate(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") } + fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> R, + { + Self::try_mutate_exists(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + } + fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result where KArg1: EncodeLike, KArg2: EncodeLike, diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 4623f81859b..347fd814136 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -366,6 +366,13 @@ pub trait StorageDoubleMap { KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> Result; + /// Mutate the value under the given keys. Deletes the item if mutated to a `None`. + fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> R; + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. fn try_mutate_exists(k1: KArg1, k2: KArg2, f: F) -> Result where -- GitLab From 549050b7f1740c90855e777daf3f9700750ad7ff Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 4 Aug 2020 19:58:03 +0200 Subject: [PATCH 050/132] pallet-democracy use of weightinfo (#6783) * democracy use of weightinfo * fix some doc and benchs * todo generate from parity machine * factorize and add license * use final weights * add slightly more sensible default weight * refactor * rename benchmark to avoid confusion * just make remove_other_vote benchmark being the worst case of the extrinsic --- bin/node/runtime/src/lib.rs | 2 +- bin/node/runtime/src/weights/mod.rs | 1 + .../runtime/src/weights/pallet_democracy.rs | 155 ++++++++++++ frame/democracy/src/benchmarking.rs | 86 ++----- frame/democracy/src/default_weight.rs | 158 +++++++++++++ frame/democracy/src/lib.rs | 223 +++++------------- 6 files changed, 395 insertions(+), 230 deletions(-) create mode 100644 bin/node/runtime/src/weights/pallet_democracy.rs create mode 100644 frame/democracy/src/default_weight.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index fff0dd3427f..acc1b072818 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -495,7 +495,7 @@ impl pallet_democracy::Trait for Runtime { type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; - type WeightInfo = (); + type WeightInfo = weights::pallet_democracy::WeightInfo; } parameter_types! { diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs index 70e10d5342f..70bae879ce0 100644 --- a/bin/node/runtime/src/weights/mod.rs +++ b/bin/node/runtime/src/weights/mod.rs @@ -16,3 +16,4 @@ //! A list of the different weight modules for our runtime. pub mod pallet_balances; +pub mod pallet_democracy; diff --git a/bin/node/runtime/src/weights/pallet_democracy.rs b/bin/node/runtime/src/weights/pallet_democracy.rs new file mode 100644 index 00000000000..2c55a848061 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_democracy.rs @@ -0,0 +1,155 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Weights for the Democracy Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_democracy::WeightInfo for WeightInfo { + fn propose() -> Weight { + (49113000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn second(s: u32, ) -> Weight { + (42067000 as Weight) + .saturating_add((220000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vote_new(r: u32, ) -> Weight { + (54159000 as Weight) + .saturating_add((252000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vote_existing(r: u32, ) -> Weight { + (54145000 as Weight) + .saturating_add((262000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn emergency_cancel() -> Weight { + (31071000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn external_propose(v: u32, ) -> Weight { + (14282000 as Weight) + .saturating_add((109000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_majority() -> Weight { + (3478000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_default() -> Weight { + (3442000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn fast_track() -> Weight { + (30820000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn veto_external(v: u32, ) -> Weight { + (30971000 as Weight) + .saturating_add((184000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_referendum() -> Weight { + (20431000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_queued(r: u32, ) -> Weight { + (42438000 as Weight) + .saturating_add((3284000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn on_initialize_base(r: u32, ) -> Weight { + (70826000 as Weight) + .saturating_add((10716000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn delegate(r: u32, ) -> Weight { + (72046000 as Weight) + .saturating_add((7837000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn undelegate(r: u32, ) -> Weight { + (41028000 as Weight) + .saturating_add((7810000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn clear_public_proposals() -> Weight { + (3643000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_preimage(b: u32, ) -> Weight { + (46629000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_imminent_preimage(b: u32, ) -> Weight { + (31147000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn reap_preimage(b: u32, ) -> Weight { + (42848000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unlock_remove(r: u32, ) -> Weight { + (45333000 as Weight) + .saturating_add((171000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn unlock_set(r: u32, ) -> Weight { + (44424000 as Weight) + .saturating_add((291000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn remove_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_other_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 421eb07e32c..1fa0988fbbd 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -31,7 +31,6 @@ use crate::Module as Democracy; const SEED: u32 = 0; const MAX_REFERENDUMS: u32 = 100; -const MAX_PROPOSALS: u32 = 100; const MAX_SECONDERS: u32 = 100; const MAX_BYTES: u32 = 16_384; @@ -101,21 +100,12 @@ benchmarks! { _ { } propose { - let p in 1 .. MAX_PROPOSALS; - - // Add p proposals - for i in 0 .. p { - add_proposal::(i)?; - } - - assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); - let caller = funded_account::("caller", 0); - let proposal_hash: T::Hash = T::Hashing::hash_of(&p); + let proposal_hash: T::Hash = T::Hashing::hash_of(&0); let value = T::MinimumDeposit::get(); }: _(RawOrigin::Signed(caller), proposal_hash, value.into()) verify { - assert_eq!(Democracy::::public_props().len(), (p + 1) as usize, "Proposals not created."); + assert_eq!(Democracy::::public_props().len(), 1, "Proposals not created."); } second { @@ -206,18 +196,8 @@ benchmarks! { } emergency_cancel { - let r in 1 .. MAX_REFERENDUMS; let origin = T::CancellationOrigin::successful_origin(); - - // Create and cancel a bunch of referendums - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - let call = Call::::emergency_cancel(ref_idx); - call.dispatch_bypass_filter(origin.clone())?; - } - - // Lets now measure one more - let referendum_index = add_referendum::(r)?; + let referendum_index = add_referendum::(0)?; let call = Call::::emergency_cancel(referendum_index); assert!(Democracy::::referendum_status(referendum_index).is_ok()); }: { call.dispatch_bypass_filter(origin)? } @@ -228,11 +208,10 @@ benchmarks! { // Worst case scenario, we external propose a previously blacklisted proposal external_propose { - let p in 1 .. MAX_PROPOSALS; let v in 1 .. MAX_VETOERS as u32; let origin = T::ExternalOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&p); + let proposal_hash = T::Hashing::hash_of(&0); // Add proposal to blacklist with block number 0 Blacklist::::insert( proposal_hash, @@ -247,10 +226,8 @@ benchmarks! { } external_propose_majority { - let p in 1 .. MAX_PROPOSALS; - let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&p); + let proposal_hash = T::Hashing::hash_of(&0); let call = Call::::external_propose_majority(proposal_hash); }: { call.dispatch_bypass_filter(origin)? } verify { @@ -259,10 +236,8 @@ benchmarks! { } external_propose_default { - let p in 1 .. MAX_PROPOSALS; - let origin = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&p); + let proposal_hash = T::Hashing::hash_of(&0); let call = Call::::external_propose_default(proposal_hash); }: { call.dispatch_bypass_filter(origin)? } verify { @@ -271,10 +246,8 @@ benchmarks! { } fast_track { - let p in 1 .. MAX_PROPOSALS; - let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&p); + let proposal_hash: T::Hash = T::Hashing::hash_of(&0); Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. @@ -315,24 +288,21 @@ benchmarks! { } cancel_referendum { - let r in 0 .. MAX_REFERENDUMS; - // Should have no effect on the execution time. - for i in 0..r { - add_referendum::(i)?; - } - let referendum_index = add_referendum::(r)?; + let referendum_index = add_referendum::(0)?; }: _(RawOrigin::Root, referendum_index) cancel_queued { let r in 1 .. MAX_REFERENDUMS; - // Should have no effect on the execution time. + for i in 0..r { - add_referendum::(i)?; + add_referendum::(i)?; // This add one element in the scheduler } + let referendum_index = add_referendum::(r)?; }: _(RawOrigin::Root, referendum_index) // Note that we have a separate benchmark for `launch_next` + #[extra] on_initialize_external { let r in 0 .. MAX_REFERENDUMS; @@ -371,6 +341,7 @@ benchmarks! { } } + #[extra] on_initialize_public { let r in 1 .. MAX_REFERENDUMS; @@ -401,7 +372,8 @@ benchmarks! { } } - on_initialize_no_launch_no_maturing { + // No launch no maturing referenda. + on_initialize_base { let r in 1 .. MAX_REFERENDUMS; for i in 0..r { @@ -526,11 +498,7 @@ benchmarks! { } clear_public_proposals { - let p in 0 .. MAX_PROPOSALS; - - for i in 0 .. p { - add_proposal::(i)?; - } + add_proposal::(0)?; }: _(RawOrigin::Root) @@ -687,41 +655,36 @@ benchmarks! { assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } + // Worst case is when target == caller and referendum is ongoing remove_other_vote { let r in 1 .. MAX_REFERENDUMS; - let other = funded_account::("other", r); + let caller = funded_account::("caller", r); let account_vote = account_vote::(100.into()); for i in 0 .. r { let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; } - let votes = match VotingOf::::get(&other) { + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct"), }; assert_eq!(votes.len(), r as usize, "Votes not created"); let referendum_index = r - 1; - ReferendumInfoOf::::insert( - referendum_index, - ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true } - ); - let caller = funded_account::("caller", 0); - - System::::set_block_number(T::EnactmentPeriod::get() * 10u32.into()); - }: _(RawOrigin::Signed(caller), other.clone(), referendum_index) + }: _(RawOrigin::Signed(caller.clone()), caller.clone(), referendum_index) verify { - let votes = match VotingOf::::get(&other) { + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct"), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } + #[extra] enact_proposal_execute { // Num of bytes in encoded proposal let b in 0 .. MAX_BYTES; @@ -743,6 +706,7 @@ benchmarks! { assert_last_event::(RawEvent::Executed(0, false).into()); } + #[extra] enact_proposal_slash { // Num of bytes in encoded proposal let b in 0 .. MAX_BYTES; @@ -788,7 +752,7 @@ mod tests { assert_ok!(test_benchmark_cancel_queued::()); assert_ok!(test_benchmark_on_initialize_external::()); assert_ok!(test_benchmark_on_initialize_public::()); - assert_ok!(test_benchmark_on_initialize_no_launch_no_maturing::()); + assert_ok!(test_benchmark_on_initialize_base::()); assert_ok!(test_benchmark_delegate::()); assert_ok!(test_benchmark_undelegate::()); assert_ok!(test_benchmark_clear_public_proposals::()); diff --git a/frame/democracy/src/default_weight.rs b/frame/democracy/src/default_weight.rs new file mode 100644 index 00000000000..2c74a4af202 --- /dev/null +++ b/frame/democracy/src/default_weight.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the Democracy Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +/// Default implementation of weight, this is just from an example return, values may change +/// depending on the runtime. This is not meant to be used in production. +impl crate::WeightInfo for () { + fn propose() -> Weight { + (49113000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn second(s: u32, ) -> Weight { + (42067000 as Weight) + .saturating_add((220000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vote_new(r: u32, ) -> Weight { + (54159000 as Weight) + .saturating_add((252000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vote_existing(r: u32, ) -> Weight { + (54145000 as Weight) + .saturating_add((262000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn emergency_cancel() -> Weight { + (31071000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn external_propose(v: u32, ) -> Weight { + (14282000 as Weight) + .saturating_add((109000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_majority() -> Weight { + (3478000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_default() -> Weight { + (3442000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn fast_track() -> Weight { + (30820000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn veto_external(v: u32, ) -> Weight { + (30971000 as Weight) + .saturating_add((184000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_referendum() -> Weight { + (20431000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_queued(r: u32, ) -> Weight { + (42438000 as Weight) + .saturating_add((3284000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn on_initialize_base(r: u32, ) -> Weight { + (70826000 as Weight) + .saturating_add((10716000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn delegate(r: u32, ) -> Weight { + (72046000 as Weight) + .saturating_add((7837000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn undelegate(r: u32, ) -> Weight { + (41028000 as Weight) + .saturating_add((7810000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn clear_public_proposals() -> Weight { + (3643000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_preimage(b: u32, ) -> Weight { + (46629000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_imminent_preimage(b: u32, ) -> Weight { + (31147000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn reap_preimage(b: u32, ) -> Weight { + (42848000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unlock_remove(r: u32, ) -> Weight { + (45333000 as Weight) + .saturating_add((171000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn unlock_set(r: u32, ) -> Weight { + (44424000 as Weight) + .saturating_add((291000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn remove_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_other_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index f546b87dc60..f3a5960eb2f 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -173,6 +173,7 @@ mod vote_threshold; mod vote; mod conviction; mod types; +mod default_weight; pub use vote_threshold::{Approved, VoteThreshold}; pub use vote::{Vote, AccountVote, Voting}; pub use conviction::Conviction; @@ -189,7 +190,7 @@ const DEMOCRACY_ID: LockIdentifier = *b"democrac"; /// The maximum number of vetoers on a single proposal used to compute Weight. /// /// NOTE: This is not enforced by any logic. -pub const MAX_VETOERS: Weight = 100; +pub const MAX_VETOERS: u32 = 100; /// A proposal index. pub type PropIndex = u32; @@ -202,24 +203,22 @@ type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait WeightInfo { - fn propose(p: u32, ) -> Weight; + fn propose() -> Weight; fn second(s: u32, ) -> Weight; fn vote_new(r: u32, ) -> Weight; fn vote_existing(r: u32, ) -> Weight; - fn emergency_cancel(r: u32, ) -> Weight; - fn external_propose(p: u32, v: u32, ) -> Weight; - fn external_propose_majority(p: u32, ) -> Weight; - fn external_propose_default(p: u32, ) -> Weight; - fn fast_track(p: u32, ) -> Weight; + fn emergency_cancel() -> Weight; + fn external_propose(v: u32, ) -> Weight; + fn external_propose_majority() -> Weight; + fn external_propose_default() -> Weight; + fn fast_track() -> Weight; fn veto_external(v: u32, ) -> Weight; - fn cancel_referendum(r: u32, ) -> Weight; + fn cancel_referendum() -> Weight; fn cancel_queued(r: u32, ) -> Weight; - fn on_initialize_external(r: u32, ) -> Weight; - fn on_initialize_public(r: u32, ) -> Weight; - fn on_initialize_no_launch_no_maturing(r: u32, ) -> Weight; + fn on_initialize_base(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; - fn clear_public_proposals(p: u32, ) -> Weight; + fn clear_public_proposals() -> Weight; fn note_preimage(b: u32, ) -> Weight; fn note_imminent_preimage(b: u32, ) -> Weight; fn reap_preimage(b: u32, ) -> Weight; @@ -227,38 +226,6 @@ pub trait WeightInfo { fn unlock_set(r: u32, ) -> Weight; fn remove_vote(r: u32, ) -> Weight; fn remove_other_vote(r: u32, ) -> Weight; - fn enact_proposal_execute(b: u32, ) -> Weight; - fn enact_proposal_slash(b: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn propose(_p: u32, ) -> Weight { 1_000_000_000 } - fn second(_s: u32, ) -> Weight { 1_000_000_000 } - fn vote_new(_r: u32, ) -> Weight { 1_000_000_000 } - fn vote_existing(_r: u32, ) -> Weight { 1_000_000_000 } - fn emergency_cancel(_r: u32, ) -> Weight { 1_000_000_000 } - fn external_propose(_p: u32, _v: u32, ) -> Weight { 1_000_000_000 } - fn external_propose_majority(_p: u32, ) -> Weight { 1_000_000_000 } - fn external_propose_default(_p: u32, ) -> Weight { 1_000_000_000 } - fn fast_track(_p: u32, ) -> Weight { 1_000_000_000 } - fn veto_external(_v: u32, ) -> Weight { 1_000_000_000 } - fn cancel_referendum(_r: u32, ) -> Weight { 1_000_000_000 } - fn cancel_queued(_r: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize_external(_r: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize_public(_r: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize_no_launch_no_maturing(_r: u32, ) -> Weight { 1_000_000_000 } - fn delegate(_r: u32, ) -> Weight { 1_000_000_000 } - fn undelegate(_r: u32, ) -> Weight { 1_000_000_000 } - fn clear_public_proposals(_p: u32, ) -> Weight { 1_000_000_000 } - fn note_preimage(_b: u32, ) -> Weight { 1_000_000_000 } - fn note_imminent_preimage(_b: u32, ) -> Weight { 1_000_000_000 } - fn reap_preimage(_b: u32, ) -> Weight { 1_000_000_000 } - fn unlock_remove(_r: u32, ) -> Weight { 1_000_000_000 } - fn unlock_set(_r: u32, ) -> Weight { 1_000_000_000 } - fn remove_vote(_r: u32, ) -> Weight { 1_000_000_000 } - fn remove_other_vote(_r: u32, ) -> Weight { 1_000_000_000 } - fn enact_proposal_execute(_b: u32, ) -> Weight { 1_000_000_000 } - fn enact_proposal_slash(_b: u32, ) -> Weight { 1_000_000_000 } } pub trait Trait: frame_system::Trait + Sized { @@ -578,63 +545,6 @@ decl_error! { } } -/// Functions for calcuating the weight of some dispatchables. -mod weight_for { - use frame_support::{traits::Get, weights::Weight}; - use super::Trait; - - /// Calculate the weight for `delegate`. - /// - Db reads: 2*`VotingOf`, `balances locks` - /// - Db writes: 2*`VotingOf`, `balances locks` - /// - Db reads per votes: `ReferendumInfoOf` - /// - Db writes per votes: `ReferendumInfoOf` - /// - Base Weight: 65.78 + 8.229 * R µs - // NOTE: weight must cover an incorrect voting of origin with 100 votes. - pub fn delegate(votes: Weight) -> Weight { - T::DbWeight::get().reads_writes(votes.saturating_add(3), votes.saturating_add(3)) - .saturating_add(66_000_000) - .saturating_add(votes.saturating_mul(8_100_000)) - } - - /// Calculate the weight for `undelegate`. - /// - Db reads: 2*`VotingOf` - /// - Db writes: 2*`VotingOf` - /// - Db reads per votes: `ReferendumInfoOf` - /// - Db writes per votes: `ReferendumInfoOf` - /// - Base Weight: 33.29 + 8.104 * R µs - pub fn undelegate(votes: Weight) -> Weight { - T::DbWeight::get().reads_writes(votes.saturating_add(2), votes.saturating_add(2)) - .saturating_add(33_000_000) - .saturating_add(votes.saturating_mul(8_000_000)) - } - - /// Calculate the weight for `note_preimage`. - /// # - /// - Complexity: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - /// - Db reads: `Preimages` - /// - Db writes: `Preimages` - /// - Base Weight: 37.93 + .004 * b µs - /// # - pub fn note_preimage(encoded_proposal_len: Weight) -> Weight { - T::DbWeight::get().reads_writes(1, 1) - .saturating_add(38_000_000) - .saturating_add(encoded_proposal_len.saturating_mul(4_000)) - } - - /// Calculate the weight for `note_imminent_preimage`. - /// # - /// - Complexity: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - /// - Db reads: `Preimages` - /// - Db writes: `Preimages` - /// - Base Weight: 28.04 + .003 * b µs - /// # - pub fn note_imminent_preimage(encoded_proposal_len: Weight) -> Weight { - T::DbWeight::get().reads_writes(1, 1) - .saturating_add(28_000_000) - .saturating_add(encoded_proposal_len.saturating_mul(3_000)) - } -} - decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; @@ -683,10 +593,8 @@ decl_module! { /// - Complexity: `O(1)` /// - Db reads: `PublicPropCount`, `PublicProps` /// - Db writes: `PublicPropCount`, `PublicProps`, `DepositOf` - /// ------------------- - /// Base Weight: 42.58 + .127 * P µs with `P` the number of proposals `PublicProps` /// # - #[weight = 50_000_000 + T::DbWeight::get().reads_writes(2, 3)] + #[weight = T::WeightInfo::propose()] fn propose(origin, proposal_hash: T::Hash, #[compact] value: BalanceOf) { let who = ensure_signed(origin)?; ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); @@ -715,13 +623,8 @@ decl_module! { /// - Complexity: `O(S)` where S is the number of seconds a proposal already has. /// - Db reads: `DepositOf` /// - Db writes: `DepositOf` - /// --------- - /// - Base Weight: 22.28 + .229 * S µs /// # - #[weight = 23_000_000 - .saturating_add(230_000.saturating_mul(Weight::from(*seconds_upper_bound))) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - ] + #[weight = T::WeightInfo::second(*seconds_upper_bound)] fn second(origin, #[compact] proposal: PropIndex, #[compact] seconds_upper_bound: u32) { let who = ensure_signed(origin)?; @@ -748,12 +651,9 @@ decl_module! { /// weight is charged as if maximum votes. /// - Db reads: `ReferendumInfoOf`, `VotingOf`, `balances locks` /// - Db writes: `ReferendumInfoOf`, `VotingOf`, `balances locks` - /// -------------------- - /// - Base Weight: - /// - Vote New: 49.24 + .333 * R µs - /// - Vote Existing: 49.94 + .343 * R µs /// # - #[weight = 50_000_000 + 350_000 * Weight::from(T::MaxVotes::get()) + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::vote_new(T::MaxVotes::get()) + .max(T::WeightInfo::vote_existing(T::MaxVotes::get()))] fn vote(origin, #[compact] ref_index: ReferendumIndex, vote: AccountVote>, @@ -773,10 +673,8 @@ decl_module! { /// - Complexity: `O(1)`. /// - Db reads: `ReferendumInfoOf`, `Cancellations` /// - Db writes: `ReferendumInfoOf`, `Cancellations` - /// ------------- - /// - Base Weight: 34.25 µs /// # - #[weight = (35_000_000 + T::DbWeight::get().reads_writes(2, 2), DispatchClass::Operational)] + #[weight = (T::WeightInfo::emergency_cancel(), DispatchClass::Operational)] fn emergency_cancel(origin, ref_index: ReferendumIndex) { T::CancellationOrigin::ensure_origin(origin)?; @@ -800,9 +698,8 @@ decl_module! { /// Decoding vec of length V. Charged as maximum /// - Db reads: `NextExternal`, `Blacklist` /// - Db writes: `NextExternal` - /// - Base Weight: 13.8 + .106 * V µs /// # - #[weight = 15_000_000 + 110_000 * MAX_VETOERS + T::DbWeight::get().reads_writes(2, 1)] + #[weight = T::WeightInfo::external_propose(MAX_VETOERS)] fn external_propose(origin, proposal_hash: T::Hash) { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); @@ -828,9 +725,8 @@ decl_module! { /// # /// - Complexity: `O(1)` /// - Db write: `NextExternal` - /// - Base Weight: 3.065 µs /// # - #[weight = 3_100_000 + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::external_propose_majority()] fn external_propose_majority(origin, proposal_hash: T::Hash) { T::ExternalMajorityOrigin::ensure_origin(origin)?; >::put((proposal_hash, VoteThreshold::SimpleMajority)); @@ -849,9 +745,8 @@ decl_module! { /// # /// - Complexity: `O(1)` /// - Db write: `NextExternal` - /// - Base Weight: 3.087 µs /// # - #[weight = 3_100_000 + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::external_propose_default()] fn external_propose_default(origin, proposal_hash: T::Hash) { T::ExternalDefaultOrigin::ensure_origin(origin)?; >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); @@ -877,7 +772,7 @@ decl_module! { /// - Db writes: `NextExternal`, `ReferendumCount`, `ReferendumInfoOf` /// - Base Weight: 30.1 µs /// # - #[weight = 30_000_000 + T::DbWeight::get().reads_writes(2, 3)] + #[weight = T::WeightInfo::fast_track()] fn fast_track(origin, proposal_hash: T::Hash, voting_period: T::BlockNumber, @@ -926,9 +821,8 @@ decl_module! { /// Performs a binary search on `existing_vetoers` which should not be very large. /// - Db reads: `NextExternal`, `Blacklist` /// - Db writes: `NextExternal`, `Blacklist` - /// - Base Weight: 29.87 + .188 * V µs /// # - #[weight = 30_000_000 + 180_000 * MAX_VETOERS + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::veto_external(MAX_VETOERS)] fn veto_external(origin, proposal_hash: T::Hash) { let who = T::VetoOrigin::ensure_origin(origin)?; @@ -961,9 +855,8 @@ decl_module! { /// # /// - Complexity: `O(1)`. /// - Db writes: `ReferendumInfoOf` - /// - Base Weight: 21.57 µs /// # - #[weight = (22_000_000 + T::DbWeight::get().writes(1), DispatchClass::Operational)] + #[weight = T::WeightInfo::cancel_referendum()] fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { ensure_root(origin)?; Self::internal_cancel_referendum(ref_index); @@ -979,9 +872,8 @@ decl_module! { /// - `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. /// - Db reads: `scheduler lookup`, scheduler agenda` /// - Db writes: `scheduler lookup`, scheduler agenda` - /// - Base Weight: 36.78 + 3.277 * D µs /// # - #[weight = (68_000_000 + T::DbWeight::get().reads_writes(2, 2), DispatchClass::Operational)] + #[weight = (T::WeightInfo::cancel_queued(10), DispatchClass::Operational)] fn cancel_queued(origin, which: ReferendumIndex) { ensure_root(origin)?; T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) @@ -1017,14 +909,14 @@ decl_module! { /// # /// - Complexity: `O(R)` where R is the number of referendums the voter delegating to has /// voted on. Weight is charged as if maximum votes. - /// - Db reads: 2*`VotingOf`, `balances locks` - /// - Db writes: 2*`VotingOf`, `balances locks` + /// - Db reads: 3*`VotingOf`, `origin account locks` + /// - Db writes: 3*`VotingOf`, `origin account locks` /// - Db reads per votes: `ReferendumInfoOf` /// - Db writes per votes: `ReferendumInfoOf` - /// - Base Weight: 65.78 + 8.229 * R µs - // NOTE: weight must cover an incorrect voting of origin with 100 votes. + // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure + // because a valid delegation cover decoding a direct voting with max votes. /// # - #[weight = weight_for::delegate::(T::MaxVotes::get().into())] + #[weight = T::WeightInfo::delegate(T::MaxVotes::get())] pub fn delegate( origin, to: T::AccountId, @@ -1034,7 +926,7 @@ decl_module! { let who = ensure_signed(origin)?; let votes = Self::try_delegate(who, to, conviction, balance)?; - Ok(Some(weight_for::delegate::(votes.into())).into()) + Ok(Some(T::WeightInfo::delegate(votes)).into()) } /// Undelegate the voting power of the sending account. @@ -1054,14 +946,14 @@ decl_module! { /// - Db writes: 2*`VotingOf` /// - Db reads per votes: `ReferendumInfoOf` /// - Db writes per votes: `ReferendumInfoOf` - /// - Base Weight: 33.29 + 8.104 * R µs - // NOTE: weight must cover an incorrect voting of origin with 100 votes. + // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure + // because a valid delegation cover decoding a direct voting with max votes. /// # - #[weight = weight_for::undelegate::(T::MaxVotes::get().into())] + #[weight = T::WeightInfo::undelegate(T::MaxVotes::get().into())] fn undelegate(origin) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let votes = Self::try_undelegate(who)?; - Ok(Some(weight_for::undelegate::(votes.into())).into()) + Ok(Some(T::WeightInfo::undelegate(votes)).into()) } /// Clears all public proposals. @@ -1071,9 +963,8 @@ decl_module! { /// # /// - `O(1)`. /// - Db writes: `PublicProps` - /// - Base Weight: 2.505 µs /// # - #[weight = 2_500_000 + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::clear_public_proposals()] fn clear_public_proposals(origin) { ensure_root(origin)?; >::kill(); @@ -1089,16 +980,18 @@ decl_module! { /// Emits `PreimageNoted`. /// /// # - /// see `weight_for::note_preimage` + /// - Complexity: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). + /// - Db reads: `Preimages` + /// - Db writes: `Preimages` /// # - #[weight = weight_for::note_preimage::((encoded_proposal.len() as u32).into())] + #[weight = T::WeightInfo::note_preimage(encoded_proposal.len() as u32)] fn note_preimage(origin, encoded_proposal: Vec) { Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; } /// Same as `note_preimage` but origin is `OperationalPreimageOrigin`. #[weight = ( - weight_for::note_preimage::((encoded_proposal.len() as u32).into()), + T::WeightInfo::note_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, )] fn note_preimage_operational(origin, encoded_proposal: Vec) { @@ -1116,16 +1009,18 @@ decl_module! { /// Emits `PreimageNoted`. /// /// # - /// see `weight_for::note_preimage` + /// - Complexity: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). + /// - Db reads: `Preimages` + /// - Db writes: `Preimages` /// # - #[weight = weight_for::note_imminent_preimage::((encoded_proposal.len() as u32).into())] + #[weight = T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32)] fn note_imminent_preimage(origin, encoded_proposal: Vec) { Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; } /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. #[weight = ( - weight_for::note_imminent_preimage::((encoded_proposal.len() as u32).into()), + T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, )] fn note_imminent_preimage_operational(origin, encoded_proposal: Vec) { @@ -1149,12 +1044,10 @@ decl_module! { /// /// # /// - Complexity: `O(D)` where D is length of proposal. - /// - Db reads: `Preimages` - /// - Db writes: `Preimages` - /// - Base Weight: 39.31 + .003 * b µs + /// - Db reads: `Preimages`, provider account data + /// - Db writes: `Preimages` provider account data /// # - #[weight = (39_000_000 + T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(3_000.saturating_mul(Weight::from(*proposal_len_upper_bound)))] + #[weight = T::WeightInfo::reap_preimage(*proposal_len_upper_bound)] fn reap_preimage(origin, proposal_hash: T::Hash, #[compact] proposal_len_upper_bound: u32) { let who = ensure_signed(origin)?; @@ -1191,12 +1084,9 @@ decl_module! { /// - Complexity `O(R)` with R number of vote of target. /// - Db reads: `VotingOf`, `balances locks`, `target account` /// - Db writes: `VotingOf`, `balances locks`, `target account` - /// - Base Weight: - /// - Unlock Remove: 42.96 + .048 * R - /// - Unlock Set: 37.63 + .327 * R /// # - #[weight = 43_000_000 + 330_000 * Weight::from(T::MaxVotes::get()) - + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::unlock_set(T::MaxVotes::get()) + .max(T::WeightInfo::unlock_remove(T::MaxVotes::get()))] fn unlock(origin, target: T::AccountId) { ensure_signed(origin)?; Self::update_lock(&target); @@ -1232,9 +1122,8 @@ decl_module! { /// Weight is calculated for the maximum number of vote. /// - Db reads: `ReferendumInfoOf`, `VotingOf` /// - Db writes: `ReferendumInfoOf`, `VotingOf` - /// - Base Weight: 21.03 + .359 * R /// # - #[weight = 21_000_000 + 360_000 * Weight::from(T::MaxVotes::get()) + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::remove_vote(T::MaxVotes::get())] fn remove_vote(origin, index: ReferendumIndex) -> DispatchResult { let who = ensure_signed(origin)?; Self::try_remove_vote(&who, index, UnvoteScope::Any) @@ -1258,9 +1147,8 @@ decl_module! { /// Weight is calculated for the maximum number of vote. /// - Db reads: `ReferendumInfoOf`, `VotingOf` /// - Db writes: `ReferendumInfoOf`, `VotingOf` - /// - Base Weight: 19.15 + .372 * R /// # - #[weight = 19_000_000 + 370_000 * Weight::from(T::MaxVotes::get()) + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::remove_other_vote(T::MaxVotes::get())] fn remove_other_vote(origin, target: T::AccountId, index: ReferendumIndex) -> DispatchResult { let who = ensure_signed(origin)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; @@ -1716,10 +1604,9 @@ impl Module { /// `ReferendumCount`, `LowestUnbaked` /// - Db writes: `PublicProps`, `account`, `ReferendumCount`, `DepositOf`, `ReferendumInfoOf` /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` - /// - Base Weight: 58.58 + 10.9 * R µs /// # fn begin_block(now: T::BlockNumber) -> Result { - let mut weight = 60_000_000 + T::DbWeight::get().reads_writes(6, 5); + let mut weight = 0; // pick out another public referendum if it's time. if (now % T::LaunchPeriod::get()).is_zero() { @@ -1729,11 +1616,11 @@ impl Module { weight = T::MaximumBlockWeight::get(); } - // tally up votes for any expiring referenda. let next = Self::lowest_unbaked(); let last = Self::referendum_count(); - let r = Weight::from(last.saturating_sub(next)); - weight += 11_000_000 * r + T::DbWeight::get().reads(2 * r); + let r = last.saturating_sub(next); + weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); + // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { let approved = Self::bake_referendum(now, index, info)?; ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); -- GitLab From f71a5725cb7bc5c474eb20c32519e514f18c9872 Mon Sep 17 00:00:00 2001 From: ddorgan Date: Wed, 5 Aug 2020 10:39:13 +0200 Subject: [PATCH 051/132] Use DNS hostnames for flaming fir bootnodes (#6807) * Use dns hostnames for flaming fir bootnodes * Remove newline --- bin/node/cli/res/flaming-fir.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 376c6a0e6e7..e2ecac2b448 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -3,14 +3,14 @@ "id": "flamingfir8", "chainType": "Live", "bootNodes": [ - "/ip4/35.246.224.91/tcp/30333/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", - "/ip4/35.246.224.91/tcp/30334/ws/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", - "/ip4/35.246.210.11/tcp/30333/p2p/12D3KooWHyUSQkoL1WtnhLUYHuKbowZEZW1NNJe7TePKYZf9ucBY", - "/ip4/35.246.210.11/tcp/30334/ws/p2p/12D3KooWHyUSQkoL1WtnhLUYHuKbowZEZW1NNJe7TePKYZf9ucBY", - "/ip4/35.198.110.45/tcp/30333/p2p/12D3KooWFcry65ShtPT6roTTEPXD9H89A1iA2wPKgJCgXW1yZwyy", - "/ip4/35.198.110.45/tcp/30334/ws/p2p/12D3KooWFcry65ShtPT6roTTEPXD9H89A1iA2wPKgJCgXW1yZwyy", - "/ip4/35.198.114.154/tcp/30333/p2p/12D3KooWDfFapccu3KgvWyVMdXhMGPPpKiJ1yEhSMEupBZppfi9U", - "/ip4/35.198.114.154/tcp/30334/ws/p2p/12D3KooWDfFapccu3KgvWyVMdXhMGPPpKiJ1yEhSMEupBZppfi9U" + "/dns/0.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", + "/dns/0.flamingfir.paritytech.net/tcp/30334/ws/p2p/12D3KooWLK2gMLhWsYJzjW3q35zAs9FDDVqfqVfVuskiGZGRSMvR", + "/dns/1.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWHyUSQkoL1WtnhLUYHuKbowZEZW1NNJe7TePKYZf9ucBY", + "/dns/1.flamingfir.paritytech.net/tcp/30334/ws/p2p/12D3KooWHyUSQkoL1WtnhLUYHuKbowZEZW1NNJe7TePKYZf9ucBY", + "/dns/2.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWFcry65ShtPT6roTTEPXD9H89A1iA2wPKgJCgXW1yZwyy", + "/dns/2.flamingfir.paritytech.net/tcp/30334/ws/p2p/12D3KooWFcry65ShtPT6roTTEPXD9H89A1iA2wPKgJCgXW1yZwyy", + "/dns/3.flamingfir.paritytech.net/tcp/30333/p2p/12D3KooWDfFapccu3KgvWyVMdXhMGPPpKiJ1yEhSMEupBZppfi9U", + "/dns/3.flamingfir.paritytech.net/tcp/30334/ws/p2p/12D3KooWDfFapccu3KgvWyVMdXhMGPPpKiJ1yEhSMEupBZppfi9U" ], "telemetryEndpoints": [ [ -- GitLab From f4bd80c4ca75e20ac028dfc57b1829b937976740 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Aug 2020 11:16:19 +0200 Subject: [PATCH 052/132] Fix warning being printed by authority-discovery (#6820) --- client/network/src/service.rs | 36 ++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index e4ba36be587..230af3fb8e1 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,7 +30,7 @@ use crate::{ ExHashT, NetworkStateInfo, behaviour::{Behaviour, BehaviourOut}, - config::{parse_addr, parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, + config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, DhtEvent, discovery::DiscoveryConfig, error::Error, @@ -43,7 +43,7 @@ use crate::{ transport, ReputationChange, }; use futures::prelude::*; -use libp2p::{PeerId, Multiaddr}; +use libp2p::{PeerId, multiaddr, Multiaddr}; use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; use libp2p::kad::record; use libp2p::ping::handler::PingFailure; @@ -879,21 +879,27 @@ impl NetworkService { /// Modify a peerset priority group. /// - /// Returns an `Err` if one of the given addresses contains an invalid - /// peer ID (which includes the local peer ID). + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). pub fn set_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { let peers = peers.into_iter() - .map(|p| match parse_addr(p) { - Err(e) => Err(format!("{:?}", e)), - Ok((peer, addr)) => - // Make sure the local peer ID is never added to the PSM - // or added as a "known address", even if given. - if peer == self.local_peer_id { - Err("Local peer ID in priority group.".to_string()) - } else { - Ok((peer, addr)) - } - }) + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; + + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in priority group.".to_string()) + } else { + Ok((peer, addr)) + } + }) .collect::, String>>()?; let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect(); -- GitLab From f92a86a9506f331a867d36b0aa6b0bff3e462536 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 5 Aug 2020 11:40:31 +0200 Subject: [PATCH 053/132] Better default balances weights (#6813) --- frame/balances/src/default_weight.rs | 46 ++++++++++++++++++++++++++++ frame/balances/src/lib.rs | 9 +----- 2 files changed, 47 insertions(+), 8 deletions(-) create mode 100644 frame/balances/src/default_weight.rs diff --git a/frame/balances/src/default_weight.rs b/frame/balances/src/default_weight.rs new file mode 100644 index 00000000000..47a91996005 --- /dev/null +++ b/frame/balances/src/default_weight.rs @@ -0,0 +1,46 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Weights for the Balances Pallet + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn transfer() -> Weight { + (65949000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer_keep_alive() -> Weight { + (46665000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_creating() -> Weight { + (27086000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_killing() -> Weight { + (33424000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (65343000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index cc9d9d179fa..c6b43677f2e 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -154,6 +154,7 @@ mod tests; mod tests_local; mod tests_composite; mod benchmarking; +mod default_weight; use sp_std::prelude::*; use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; @@ -187,14 +188,6 @@ pub trait WeightInfo { fn force_transfer() -> Weight; } -impl WeightInfo for () { - fn transfer() -> Weight { 1_000_000_000 } - fn transfer_keep_alive() -> Weight { 1_000_000_000 } - fn set_balance_creating() -> Weight { 1_000_000_000 } - fn set_balance_killing() -> Weight { 1_000_000_000 } - fn force_transfer() -> Weight { 1_000_000_000 } -} - pub trait Subtrait: frame_system::Trait { /// The balance of an account. type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + -- GitLab From b8ffd568b3cf7a0f5bcd0776398767082d18792d Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 5 Aug 2020 13:37:01 +0200 Subject: [PATCH 054/132] Remove generation of instance trait by decl_storage. (#6812) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove generation of instance trait, no breaking change * doc * doc * Update frame/support/src/traits.rs Co-authored-by: Bastian Köcher * Update frame/support/procedural/src/storage/instance_trait.rs Co-authored-by: Bastian Köcher --- .../src/storage/genesis_config/mod.rs | 9 ++-- .../procedural/src/storage/instance_trait.rs | 42 +++++++++---------- frame/support/procedural/src/storage/parse.rs | 11 ++++- .../procedural/src/storage/storage_struct.rs | 10 ++--- frame/support/src/traits.rs | 11 +++++ frame/support/test/tests/final_keys.rs | 4 +- frame/support/test/tests/instance.rs | 7 ++-- 7 files changed, 54 insertions(+), 40 deletions(-) diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 7cc0f7c3bef..27fbdd2cd38 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -20,7 +20,7 @@ use proc_macro2::{TokenStream, Span}; use quote::quote; -use super::{DeclStorageDefExt, instance_trait::DEFAULT_INSTANTIABLE_TRAIT_NAME}; +use super::DeclStorageDefExt; use genesis_config_def::GenesisConfigDef; use builder_def::BuilderDef; @@ -104,10 +104,9 @@ fn impl_build_storage( let name = syn::Ident::new(DEFAULT_INSTANCE_NAME, Span::call_site()); quote!( #name ) }); - let inherent_instance_bound = def.optional_instance_bound.clone().unwrap_or_else(|| { - let bound = syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site()); - quote!( #inherent_instance: #bound ) - }); + let inherent_instance_bound = quote!( + #inherent_instance: #scrate::traits::Instance + ); let build_storage_impl = quote!( <#runtime_generic: #runtime_trait, #inherent_instance_bound> diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs index 1e5e198a8c5..a28c3ae6220 100644 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ b/frame/support/procedural/src/storage/instance_trait.rs @@ -24,7 +24,6 @@ use super::DeclStorageDefExt; const NUMBER_OF_INSTANCE: usize = 16; pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; -pub(crate) const DEFAULT_INSTANTIABLE_TRAIT_NAME: &str = "__GeneratedInstantiable"; // Used to generate an instance implementation. struct InstanceDef { @@ -36,7 +35,7 @@ struct InstanceDef { pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { let mut impls = TokenStream::new(); - impls.extend(create_instance_trait(def)); + impls.extend(reexport_instance_trait(scrate, def)); // Implementation of instances. if let Some(module_instance) = &def.module_instance { @@ -70,6 +69,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre .and_then(|i| i.instance_default.as_ref()) { impls.extend(quote! { + /// Hidden instance generated to be internally used when module is used without + /// instance. #[doc(hidden)] pub type #inherent_instance = #default_instance; }); @@ -77,7 +78,11 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre let instance_def = InstanceDef { prefix: String::new(), instance_struct: inherent_instance, - doc: quote!(#[doc(hidden)]), + doc: quote!( + /// Hidden instance generated to be internally used when module is used without + /// instance. + #[doc(hidden)] + ), }; impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); } @@ -85,27 +90,19 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre impls } -fn create_instance_trait( +fn reexport_instance_trait( + scrate: &TokenStream, def: &DeclStorageDefExt, ) -> TokenStream { - let instance_trait = def.module_instance.as_ref().map(|i| i.instance_trait.clone()) - .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); - - let optional_hide = if def.module_instance.is_some() { - quote!() + if let Some(i) = def.module_instance.as_ref() { + let instance_trait = &i.instance_trait; + quote!( + /// Local import of frame_support::traits::Instance + // This import is not strictly needed but made in order not to have breaking change. + use #scrate::traits::Instance as #instance_trait; + ) } else { - quote!(#[doc(hidden)]) - }; - - quote! { - /// Tag a type as an instance of a module. - /// - /// Defines storage prefixes, they must be unique. - #optional_hide - pub trait #instance_trait: 'static { - /// The prefix used by any storage entry of an instance. - const PREFIX: &'static str; - } + quote!() } } @@ -114,8 +111,7 @@ fn create_and_impl_instance_struct( instance_def: &InstanceDef, def: &DeclStorageDefExt, ) -> TokenStream { - let instance_trait = def.module_instance.as_ref().map(|i| i.instance_trait.clone()) - .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); + let instance_trait = quote!( #scrate::traits::Instance ); let instance_struct = &instance_def.instance_struct; let prefix = format!("{}{}", instance_def.prefix, def.crate_name.to_string()); diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index b1ef2916ad8..504af6d0ffc 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -324,7 +324,16 @@ fn get_module_instance( instantiable: Option, default_instance: Option, ) -> syn::Result> { - let right_syntax = "Should be $Instance: $Instantiable = $DefaultInstance"; + let right_syntax = "Should be $I: $Instance = $DefaultInstance"; + + if instantiable.as_ref().map_or(false, |i| i != "Instance") { + let msg = format!( + "Instance trait must be named `Instance`, other names are no longer supported, because \ + it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", + instantiable.as_ref().unwrap(), + ); + return Err(syn::Error::new(instantiable.span(), msg)); + } match (instance, instantiable, default_instance) { (Some(instance), Some(instantiable), default_instance) => { diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs index 4cacb35c49d..e89b06770a6 100644 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ b/frame/support/procedural/src/storage/storage_struct.rs @@ -106,7 +106,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre type Query = #query_type; fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() } fn storage_prefix() -> &'static [u8] { @@ -130,7 +130,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre for #storage_struct #optional_storage_where_clause { fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() } fn storage_prefix() -> &'static [u8] { @@ -145,7 +145,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre type Hasher = #scrate::#hasher; fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() } fn storage_prefix() -> &'static [u8] { @@ -170,7 +170,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre for #storage_struct #optional_storage_where_clause { fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() } fn storage_prefix() -> &'static [u8] { @@ -188,7 +188,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre type Hasher2 = #scrate::#hasher2; fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() } fn storage_prefix() -> &'static [u8] { diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 752725ab466..72a3850d2d3 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1653,6 +1653,17 @@ impl IsType for T { fn into_mut(&mut self) -> &mut T { self } } +/// An instance of a pallet in the storage. +/// +/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! +/// +/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances +/// "InstanceNMyModule". +pub trait Instance: 'static { + /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" + const PREFIX: &'static str ; +} + #[cfg(test)] mod tests { use super::*; diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index e88389ade77..34da1752da0 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -53,12 +53,12 @@ mod instance { pub trait Trait: super::no_instance::Trait {} frame_support::decl_module! { - pub struct Module, I: Instantiable = DefaultInstance> + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin {} } frame_support::decl_storage!{ - trait Store for Module, I: Instantiable = DefaultInstance> + trait Store for Module, I: Instance = DefaultInstance> as FinalKeysSome { pub Value config(value): u32; diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 08389eed3aa..33e8cc1fd6c 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -36,7 +36,6 @@ pub trait Currency {} // Test for: // * No default instance -// * Custom InstantiableTrait // * Origin, Inherent, Event mod module1 { use super::*; @@ -49,7 +48,7 @@ mod module1 { } frame_support::decl_module! { - pub struct Module, I: InstantiableThing> for enum Call where + pub struct Module, I: Instance> for enum Call where origin: ::Origin, system = system, T::BlockNumber: From @@ -67,7 +66,7 @@ mod module1 { } frame_support::decl_storage! { - trait Store for Module, I: InstantiableThing> as Module1 where + trait Store for Module, I: Instance> as Module1 where T::BlockNumber: From + std::fmt::Display { pub Value config(value): T::GenericType; @@ -97,7 +96,7 @@ mod module1 { pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: InstantiableThing> ProvideInherent for Module where + impl, I: Instance> ProvideInherent for Module where T::BlockNumber: From { type Call = Call; -- GitLab From be40a78490b31ce82cb3f175f218e745a3e88833 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 5 Aug 2020 16:22:21 +0200 Subject: [PATCH 055/132] Successful `note_imminent_preimage` is free (#6793) * Successful `note_imminent_preimage` is free * update docs * Add test for duplicate preimage --- frame/democracy/src/lib.rs | 20 +++++++++++------ frame/democracy/src/tests/preimage.rs | 31 +++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index f3a5960eb2f..e298b1e4508 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -160,7 +160,7 @@ use sp_runtime::{ use codec::{Encode, Decode, Input}; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, ensure, Parameter, - weights::{Weight, DispatchClass}, + weights::{Weight, DispatchClass, Pays}, traits::{ Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, OnUnbalanced, BalanceStatus, schedule::{Named as ScheduleNamed, DispatchTime}, EnsureOrigin @@ -458,14 +458,14 @@ decl_event! { Vetoed(AccountId, Hash, BlockNumber), /// A proposal's preimage was noted, and the deposit taken. [proposal_hash, who, deposit] PreimageNoted(Hash, AccountId, Balance), - /// A proposal preimage was removed and used (the deposit was returned). + /// A proposal preimage was removed and used (the deposit was returned). /// [proposal_hash, provider, deposit] PreimageUsed(Hash, AccountId, Balance), /// A proposal could not be executed because its preimage was invalid. [proposal_hash, ref_index] PreimageInvalid(Hash, ReferendumIndex), /// A proposal could not be executed because its preimage was missing. [proposal_hash, ref_index] PreimageMissing(Hash, ReferendumIndex), - /// A registered preimage was removed and the deposit collected by the reaper. + /// A registered preimage was removed and the deposit collected by the reaper. /// [proposal_hash, provider, deposit, reaper] PreimageReaped(Hash, AccountId, Balance, AccountId), /// An [account] has been unlocked successfully. @@ -1000,7 +1000,9 @@ decl_module! { } /// Register the preimage for an upcoming proposal. This requires the proposal to be - /// in the dispatch queue. No deposit is needed. + /// in the dispatch queue. No deposit is needed. When this call is successful, i.e. + /// the preimage has not been uploaded before and matches some imminent proposal, + /// no fee is paid. /// /// The dispatch origin of this call must be _Signed_. /// @@ -1014,8 +1016,11 @@ decl_module! { /// - Db writes: `Preimages` /// # #[weight = T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32)] - fn note_imminent_preimage(origin, encoded_proposal: Vec) { + fn note_imminent_preimage(origin, encoded_proposal: Vec) -> DispatchResultWithPostInfo { Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; + // We check that this preimage was not uploaded before in `note_imminent_preimage_inner`, + // thus this call can only be successful once. If successful, user does not pay a fee. + Ok(Pays::No.into()) } /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. @@ -1023,9 +1028,12 @@ decl_module! { T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, )] - fn note_imminent_preimage_operational(origin, encoded_proposal: Vec) { + fn note_imminent_preimage_operational(origin, encoded_proposal: Vec) -> DispatchResultWithPostInfo { let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; Self::note_imminent_preimage_inner(who, encoded_proposal)?; + // We check that this preimage was not uploaded before in `note_imminent_preimage_inner`, + // thus this call can only be successful once. If successful, user does not pay a fee. + Ok(Pays::No.into()) } /// Remove an expired proposal preimage and collect the deposit. diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 4100a6a6b63..8a2cbaf5340 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -164,3 +164,34 @@ fn reaping_imminent_preimage_should_fail() { assert_noop!(Democracy::reap_preimage(Origin::signed(6), h, u32::max_value()), Error::::Imminent); }); } + +#[test] +fn note_imminent_preimage_can_only_be_successful_once() { + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 1 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + next_block(); + + // First time works + assert_ok!(Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2))); + + // Second time fails + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)), + Error::::DuplicatePreimage + ); + + // Fails from any user + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(5), set_balance_proposal(2)), + Error::::DuplicatePreimage + ); + }); +} -- GitLab From 5ce8f093a5c0dc077977a042d34a51e68de7b85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 5 Aug 2020 16:58:07 +0100 Subject: [PATCH 056/132] service: remove collection of system/process metrics (#6822) --- Cargo.lock | 346 +++++++++++----------------------- client/service/Cargo.toml | 7 - client/service/src/metrics.rs | 299 ++++------------------------- 3 files changed, 148 insertions(+), 504 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fe237f2341b..fb3cd395cf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,7 +151,7 @@ checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" dependencies = [ "approx", "num-complex", - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -184,7 +184,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" dependencies = [ - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -235,8 +235,8 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -382,7 +382,7 @@ dependencies = [ "log", "peeking_take_while", "proc-macro2", - "quote 1.0.6", + "quote", "regex", "rustc-hash", "shlex", @@ -686,7 +686,7 @@ checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" dependencies = [ "js-sys", "num-integer", - "num-traits 0.2.11", + "num-traits", "time", "wasm-bindgen", ] @@ -927,7 +927,7 @@ dependencies = [ "itertools 0.8.2", "lazy_static", "libc", - "num-traits 0.2.11", + "num-traits", "rand_core 0.3.1", "rand_os", "rand_xoshiro", @@ -953,7 +953,7 @@ dependencies = [ "csv", "itertools 0.8.2", "lazy_static", - "num-traits 0.2.11", + "num-traits", "oorandom", "plotters", "rayon", @@ -1096,8 +1096,8 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" dependencies = [ - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1136,8 +1136,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1219,8 +1219,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1258,17 +1258,6 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" -[[package]] -name = "enum-primitive-derive" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b90e520ec62c1864c8c78d637acbfe8baf5f63240f2fb8165b8325c07812dd" -dependencies = [ - "num-traits 0.1.43", - "quote 0.3.15", - "syn 0.11.11", -] - [[package]] name = "enumflags2" version = "0.6.3" @@ -1285,8 +1274,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1450,8 +1439,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "synstructure", ] @@ -1502,7 +1491,7 @@ dependencies = [ "futures 0.3.5", "futures-timer 2.0.2", "log", - "num-traits 0.2.11", + "num-traits", "parity-scale-codec", "parking_lot 0.9.0", "rand 0.6.5", @@ -1648,8 +1637,8 @@ version = "2.0.0-rc5" dependencies = [ "frame-support-procedural-tools", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1659,8 +1648,8 @@ dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1668,8 +1657,8 @@ name = "frame-support-procedural-tools-derive" version = "2.0.0-rc5" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -1885,8 +1874,8 @@ checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -2431,8 +2420,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -2571,8 +2560,8 @@ checksum = "0fadf6945e227246825a583514534d864554e9f23d80b3c77d034b10983db5ef" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -2784,18 +2773,6 @@ version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" -[[package]] -name = "libflate" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9135df43b1f5d0e333385cb6e7897ecd1a43d7d11b91ac003f4d2c2d2401fdd" -dependencies = [ - "adler32", - "crc32fast", - "rle-decode-fast", - "take_mut", -] - [[package]] name = "libloading" version = "0.5.2" @@ -2884,8 +2861,8 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "515c4a7cba5d321bb88ed3ed803997bdd5634ce35c9c5e8e9ace9c512e57eceb" dependencies = [ - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -3467,7 +3444,7 @@ dependencies = [ "matrixmultiply", "num-complex", "num-rational", - "num-traits 0.2.11", + "num-traits", "rand 0.6.5", "typenum", ] @@ -3492,20 +3469,6 @@ dependencies = [ "winapi 0.3.8", ] -[[package]] -name = "netstat2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29449d242064c48d3057a194b049a2bdcccadda16faa18a91468677b44e8d422" -dependencies = [ - "bitflags", - "byteorder", - "enum-primitive-derive", - "libc", - "num-traits 0.2.11", - "thiserror", -] - [[package]] name = "nix" version = "0.17.0" @@ -3946,15 +3909,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "ntapi" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26e041cd983acbc087e30fcba770380cfa352d0e392e175b2344ebaf7ea0602" -dependencies = [ - "winapi 0.3.8", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -3963,7 +3917,7 @@ checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ "autocfg 1.0.0", "num-integer", - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -3973,7 +3927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" dependencies = [ "autocfg 1.0.0", - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -3983,7 +3937,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" dependencies = [ "autocfg 1.0.0", - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -3995,16 +3949,7 @@ dependencies = [ "autocfg 1.0.0", "num-bigint", "num-integer", - "num-traits 0.2.11", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.11", + "num-traits", ] [[package]] @@ -4824,9 +4769,9 @@ version = "2.0.0-rc5" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.6", + "quote", "sp-runtime", - "syn 1.0.33", + "syn", ] [[package]] @@ -5026,8 +4971,8 @@ checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5093,7 +5038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2", - "syn 1.0.33", + "syn", "synstructure", ] @@ -5186,8 +5131,8 @@ checksum = "a62486e111e571b1e93b710b61e8f493c0013be39629b714cb166bdb06aa5a8a" dependencies = [ "proc-macro-hack", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5250,8 +5195,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5285,7 +5230,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" dependencies = [ "js-sys", - "num-traits 0.2.11", + "num-traits", "wasm-bindgen", "web-sys", ] @@ -5383,8 +5328,8 @@ checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678" dependencies = [ "proc-macro-error-attr", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "version_check", ] @@ -5395,8 +5340,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "syn-mid", "version_check", ] @@ -5419,22 +5364,7 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ - "unicode-xid 0.2.0", -] - -[[package]] -name = "procfs" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe50036aa1b71e553a4a0c48ab7baabf8aa8c7a5a61aae06bf38c2eab7430475" -dependencies = [ - "bitflags", - "byteorder", - "chrono", - "hex", - "lazy_static", - "libc", - "libflate", + "unicode-xid", ] [[package]] @@ -5487,8 +5417,8 @@ dependencies = [ "anyhow", "itertools 0.8.2", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5541,12 +5471,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "quote" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" - [[package]] name = "quote" version = "1.0.6" @@ -5849,8 +5773,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5929,8 +5853,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -5965,12 +5889,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "rle-decode-fast" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" - [[package]] name = "rlp" version = "0.4.5" @@ -6071,8 +5989,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -6205,8 +6123,8 @@ version = "2.0.0-rc5" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -6383,7 +6301,7 @@ dependencies = [ "merlin", "num-bigint", "num-rational", - "num-traits 0.2.11", + "num-traits", "parity-scale-codec", "parking_lot 0.10.2", "pdqselect", @@ -7015,12 +6933,10 @@ dependencies = [ "jsonrpc-pubsub", "lazy_static", "log", - "netstat2", "parity-scale-codec", "parity-util-mem 0.7.0", "parking_lot 0.10.2", "pin-project", - "procfs", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -7061,7 +6977,6 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "sysinfo", "tempfile", "tokio 0.2.21", "tracing", @@ -7267,8 +7182,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -7372,8 +7287,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -7505,8 +7420,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -7624,8 +7539,8 @@ dependencies = [ "blake2-rfc", "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -7675,7 +7590,7 @@ version = "2.0.0-rc5" dependencies = [ "criterion 0.3.1", "integer-sqrt", - "num-traits 0.2.11", + "num-traits", "parity-scale-codec", "primitive-types", "rand 0.7.3", @@ -7691,7 +7606,7 @@ version = "2.0.0-rc5" dependencies = [ "honggfuzz", "num-bigint", - "num-traits 0.2.11", + "num-traits", "primitive-types", "sp-arithmetic", ] @@ -7860,7 +7775,7 @@ dependencies = [ "libsecp256k1", "log", "merlin", - "num-traits 0.2.11", + "num-traits", "parity-scale-codec", "parity-util-mem 0.7.0", "parking_lot 0.10.2", @@ -7901,8 +7816,8 @@ name = "sp-debug-derive" version = "2.0.0-rc5" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -8000,8 +7915,8 @@ version = "2.0.0-rc5" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -8092,8 +8007,8 @@ dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -8184,7 +8099,7 @@ dependencies = [ "hex-literal", "itertools 0.9.0", "log", - "num-traits 0.2.11", + "num-traits", "parity-scale-codec", "parking_lot 0.10.2", "pretty_assertions", @@ -8392,8 +8307,8 @@ dependencies = [ "heck", "proc-macro-error", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -8413,8 +8328,8 @@ checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" dependencies = [ "heck", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -8708,17 +8623,6 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" -[[package]] -name = "syn" -version = "0.11.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" -dependencies = [ - "quote 0.3.15", - "synom", - "unicode-xid 0.0.4", -] - [[package]] name = "syn" version = "1.0.33" @@ -8726,8 +8630,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ "proc-macro2", - "quote 1.0.6", - "unicode-xid 0.2.0", + "quote", + "unicode-xid", ] [[package]] @@ -8737,17 +8641,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", -] - -[[package]] -name = "synom" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" -dependencies = [ - "unicode-xid 0.0.4", + "quote", + "syn", ] [[package]] @@ -8757,24 +8652,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", - "unicode-xid 0.2.0", -] - -[[package]] -name = "sysinfo" -version = "0.14.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2983daff11a197c7c406b130579bc362177aa54cf2cc1f34d6ac88fccaa6a5e1" -dependencies = [ - "cfg-if", - "doc-comment", - "libc", - "ntapi", - "once_cell", - "rayon", - "winapi 0.3.8", + "quote", + "syn", + "unicode-xid", ] [[package]] @@ -8820,8 +8700,8 @@ checksum = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" dependencies = [ "lazy_static", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "version_check", ] @@ -8850,8 +8730,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -9056,8 +8936,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -9257,8 +9137,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] @@ -9421,12 +9301,6 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" -[[package]] -name = "unicode-xid" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" - [[package]] name = "unicode-xid" version = "0.2.0" @@ -9611,8 +9485,8 @@ dependencies = [ "lazy_static", "log", "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "wasm-bindgen-shared", ] @@ -9634,7 +9508,7 @@ version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" dependencies = [ - "quote 1.0.6", + "quote", "wasm-bindgen-macro-support", ] @@ -9645,8 +9519,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9678,7 +9552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467" dependencies = [ "proc-macro2", - "quote 1.0.6", + "quote", ] [[package]] @@ -9718,7 +9592,7 @@ dependencies = [ "libc", "memory_units", "num-rational", - "num-traits 0.2.11", + "num-traits", "parity-wasm 0.41.0", "wasmi-validation", ] @@ -10060,8 +9934,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", "synstructure", ] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 32f6532e7e0..212d4e4b59e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -39,7 +39,6 @@ pin-project = "0.4.8" hash-db = "0.15.2" serde = "1.0.101" serde_json = "1.0.41" -sysinfo = "0.14.15" sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } @@ -76,12 +75,6 @@ sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } tracing = "0.1.10" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -[target.'cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))'.dependencies] -netstat2 = "0.8.1" - -[target.'cfg(target_os = "linux")'.dependencies] -procfs = '0.7.8' - [target.'cfg(not(target_os = "unknown"))'.dependencies] tempfile = "3.1.0" directories = "2.0.2" diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index bac8b38d423..7336d3862a6 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -19,7 +19,7 @@ use std::{convert::TryFrom, time::SystemTime}; use crate::{NetworkStatus, config::Configuration}; -use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; +use prometheus_endpoint::{register, Gauge, U64, Registry, PrometheusError, Opts, GaugeVec}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; use sp_transaction_pool::PoolStatus; @@ -27,28 +27,7 @@ use sp_utils::metrics::register_globals; use sc_client_api::ClientInfo; use sc_network::config::Role; -use sysinfo::{self, ProcessExt, SystemExt}; - -#[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] -use netstat2::{ - TcpState, ProtocolSocketInfo, iterate_sockets_info, AddressFamilyFlags, ProtocolFlags, -}; - struct PrometheusMetrics { - // system - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - load_avg: GaugeVec, - - // process - cpu_usage_percentage: Gauge, - memory_usage_bytes: Gauge, - threads: Gauge, - open_files: GaugeVec, - - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - netstat: GaugeVec, - - // -- inner counters // generic info block_height: GaugeVec, number_leaves: Gauge, @@ -62,9 +41,12 @@ struct PrometheusMetrics { } impl PrometheusMetrics { - fn setup(registry: &Registry, name: &str, version: &str, roles: u64) - -> Result - { + fn setup( + registry: &Registry, + name: &str, + version: &str, + roles: u64, + ) -> Result { register(Gauge::::with_opts( Opts::new( "build_info", @@ -88,39 +70,6 @@ impl PrometheusMetrics { )?, registry)?.set(start_time_since_epoch.as_secs()); Ok(Self { - // system - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - load_avg: register(GaugeVec::new( - Opts::new("load_avg", "System load average"), - &["over"] - )?, registry)?, - - // process - memory_usage_bytes: register(Gauge::new( - "memory_usage_bytes", "Process memory (resident set size) usage", - )?, registry)?, - - cpu_usage_percentage: register(Gauge::new( - "cpu_usage_percentage", "Process CPU usage, percentage per core summed over all cores", - )?, registry)?, - - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - netstat: register(GaugeVec::new( - Opts::new("netstat_tcp", "Number of TCP sockets of the process"), - &["status"] - )?, registry)?, - - threads: register(Gauge::new( - "threads", "Number of threads used by the process", - )?, registry)?, - - open_files: register(GaugeVec::new( - Opts::new("open_file_handles", "Number of open file handlers held by the process"), - &["fd_type"] - )?, registry)?, - - // --- internal - // generic internals block_height: register(GaugeVec::new( Opts::new("block_height", "Block height info of the chain"), @@ -154,116 +103,19 @@ impl PrometheusMetrics { } } -#[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] -#[derive(Default)] -struct ConnectionsCount { - listen: u64, - established: u64, - starting: u64, - closing: u64, - closed: u64, - other: u64 -} - -#[derive(Default)] -struct FdCounter { - paths: u64, - sockets: u64, - net: u64, - pipes: u64, - anon_inode: u64, - mem: u64, - other: u64, -} - -#[derive(Default)] -struct ProcessInfo { - cpu_usage: f64, - memory: u64, - threads: Option, - open_fd: Option, -} - pub struct MetricsService { metrics: Option, - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - system: sysinfo::System, - pid: Option, -} - -#[cfg(target_os = "linux")] -impl MetricsService { - fn inner_new(metrics: Option) -> Self { - let process = procfs::process::Process::myself() - .expect("Procfs doesn't fail on unix. qed"); - - Self { - metrics, - system: sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes()), - pid: Some(process.pid), - } - } - - fn process_info(&mut self) -> ProcessInfo { - let pid = self.pid.clone().expect("unix always has a pid. qed"); - let mut info = self.process_info_for(&pid); - let process = procfs::process::Process::new(pid).expect("Our process exists. qed."); - info.threads = process.stat().ok().map(|s| - u64::try_from(s.num_threads).expect("There are no negative thread counts. qed"), - ); - info.open_fd = process.fd().ok().map(|i| - i.into_iter().fold(FdCounter::default(), |mut f, info| { - match info.target { - procfs::process::FDTarget::Path(_) => f.paths += 1, - procfs::process::FDTarget::Socket(_) => f.sockets += 1, - procfs::process::FDTarget::Net(_) => f.net += 1, - procfs::process::FDTarget::Pipe(_) => f.pipes += 1, - procfs::process::FDTarget::AnonInode(_) => f.anon_inode += 1, - procfs::process::FDTarget::MemFD(_) => f.mem += 1, - procfs::process::FDTarget::Other(_,_) => f.other += 1, - }; - f - }) - ); - info - } } -#[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios"), not(target_os = "linux")))] impl MetricsService { - fn inner_new(metrics: Option) -> Self { - Self { - metrics, - system: sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_processes()), - pid: sysinfo::get_current_pid().ok(), - } - } - - fn process_info(&mut self) -> ProcessInfo { - self.pid.map(|pid| self.process_info_for(&pid)).unwrap_or_default() - } -} - - -#[cfg(not(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios"))))] -impl MetricsService { - fn inner_new(metrics: Option) -> Self { - Self { - metrics, - pid: None, - } - } - - fn process_info(&mut self) -> ProcessInfo { - ProcessInfo::default() + pub fn new() -> Self { + MetricsService { metrics: None } } -} - -impl MetricsService { - pub fn with_prometheus(registry: &Registry, config: &Configuration) - -> Result - { + pub fn with_prometheus( + registry: &Registry, + config: &Configuration, + ) -> Result { let role_bits = match config.role { Role::Full => 1u64, Role::Light => 2u64, @@ -271,57 +123,13 @@ impl MetricsService { Role::Authority { .. } => 4u64, }; - PrometheusMetrics::setup(registry, &config.network.node_name, &config.impl_version, role_bits).map(|p| { - Self::inner_new(Some(p)) - }) - } - - pub fn new() -> Self { - Self::inner_new(None) - } - - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - fn process_info_for(&mut self, pid: &sysinfo::Pid) -> ProcessInfo { - let mut info = ProcessInfo::default(); - self.system.refresh_process(*pid); - self.system.get_process(*pid).map(|prc| { - info.cpu_usage = prc.cpu_usage().into(); - info.memory = prc.memory(); - }); - info - } - - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - fn connections_info(&self) -> Option { - self.pid.as_ref().and_then(|pid| { - let af_flags = AddressFamilyFlags::IPV4 | AddressFamilyFlags::IPV6; - let proto_flags = ProtocolFlags::TCP; - let netstat_pid = *pid as u32; - - iterate_sockets_info(af_flags, proto_flags).ok().map(|iter| - iter.filter_map(|r| - r.ok().and_then(|s| { - match s.protocol_socket_info { - ProtocolSocketInfo::Tcp(info) - if s.associated_pids.contains(&netstat_pid) => Some(info.state), - _ => None - } - }) - ).fold(ConnectionsCount::default(), |mut counter, socket_state| { - match socket_state { - TcpState::Listen => counter.listen += 1, - TcpState::Established => counter.established += 1, - TcpState::Closed => counter.closed += 1, - TcpState::SynSent | TcpState::SynReceived => counter.starting += 1, - TcpState::FinWait1 | TcpState::FinWait2 | TcpState::CloseWait - | TcpState::Closing | TcpState::LastAck => counter.closing += 1, - _ => counter.other += 1 - } - - counter - }) - ) - }) + PrometheusMetrics::setup( + registry, + &config.network.node_name, + &config.impl_version, + role_bits, + ) + .map(|p| MetricsService { metrics: Some(p) }) } pub fn tick( @@ -330,16 +138,15 @@ impl MetricsService { txpool_status: &PoolStatus, net_status: &NetworkStatus, ) { - let best_number = info.chain.best_number.saturated_into::(); let best_hash = info.chain.best_hash; let num_peers = net_status.num_connected_peers; let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); let bandwidth_download = net_status.average_download_per_sec; let bandwidth_upload = net_status.average_upload_per_sec; - let best_seen_block = net_status.best_seen_block + let best_seen_block = net_status + .best_seen_block .map(|num: NumberFor| num.unique_saturated_into() as u64); - let process_info = self.process_info(); telemetry!( SUBSTRATE_INFO; @@ -348,8 +155,6 @@ impl MetricsService { "height" => best_number, "best" => ?best_hash, "txcount" => txpool_status.ready, - "cpu" => process_info.cpu_usage, - "memory" => process_info.memory, "finalized_height" => finalized_number, "finalized_hash" => ?info.chain.finalized_hash, "bandwidth_download" => bandwidth_download, @@ -369,34 +174,23 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - metrics.cpu_usage_percentage.set(process_info.cpu_usage as f64); - // `sysinfo::Process::memory` returns memory usage in KiB and not bytes. - metrics.memory_usage_bytes.set(process_info.memory * 1024); - - if let Some(threads) = process_info.threads { - metrics.threads.set(threads); - } - - if let Some(fd_info) = process_info.open_fd { - metrics.open_files.with_label_values(&["paths"]).set(fd_info.paths); - metrics.open_files.with_label_values(&["mem"]).set(fd_info.mem); - metrics.open_files.with_label_values(&["sockets"]).set(fd_info.sockets); - metrics.open_files.with_label_values(&["net"]).set(fd_info.net); - metrics.open_files.with_label_values(&["pipe"]).set(fd_info.pipes); - metrics.open_files.with_label_values(&["anon_inode"]).set(fd_info.anon_inode); - metrics.open_files.with_label_values(&["other"]).set(fd_info.other); - } - - - metrics.network_per_sec_bytes.with_label_values(&["download"]).set( - net_status.average_download_per_sec, - ); - metrics.network_per_sec_bytes.with_label_values(&["upload"]).set( - net_status.average_upload_per_sec, - ); + metrics + .network_per_sec_bytes + .with_label_values(&["download"]) + .set(net_status.average_download_per_sec); + metrics + .network_per_sec_bytes + .with_label_values(&["upload"]) + .set(net_status.average_upload_per_sec); + metrics + .block_height + .with_label_values(&["finalized"]) + .set(finalized_number); + metrics + .block_height + .with_label_values(&["best"]) + .set(best_number); - metrics.block_height.with_label_values(&["finalized"]).set(finalized_number); - metrics.block_height.with_label_values(&["best"]).set(best_number); if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { metrics.number_leaves.set(leaves); } @@ -421,23 +215,6 @@ impl MetricsService { info.memory.state_db.pinned.as_bytes() as u64, ); } - - #[cfg(all(any(unix, windows), not(target_os = "android"), not(target_os = "ios")))] - { - let load = self.system.get_load_average(); - metrics.load_avg.with_label_values(&["1min"]).set(load.one); - metrics.load_avg.with_label_values(&["5min"]).set(load.five); - metrics.load_avg.with_label_values(&["15min"]).set(load.fifteen); - - if let Some(conns) = self.connections_info() { - metrics.netstat.with_label_values(&["listen"]).set(conns.listen); - metrics.netstat.with_label_values(&["established"]).set(conns.established); - metrics.netstat.with_label_values(&["starting"]).set(conns.starting); - metrics.netstat.with_label_values(&["closing"]).set(conns.closing); - metrics.netstat.with_label_values(&["closed"]).set(conns.closed); - metrics.netstat.with_label_values(&["other"]).set(conns.other); - } - } } } } -- GitLab From 35f3ba44819c2e0de54d01beddb24b3f0e24e8ef Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Aug 2020 18:20:09 +0200 Subject: [PATCH 057/132] Fix legacy substream fallback not working (#6826) * Fix legacy substream fallback not working * Make it nicer --- .../network/src/protocol/generic_proto/handler/group.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 2826f7a19c8..dfa06693698 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -600,15 +600,10 @@ impl ProtocolsHandler for NotifsHandler { message } => { for (handler, _) in &mut self.out_handlers { - if handler.protocol_name() != &protocol_name[..] { - continue; - } - - if handler.is_open() { + if handler.protocol_name() == &protocol_name[..] && handler.is_open() { handler.send_or_discard(message); + continue 'poll_notifs_sink; } - - continue 'poll_notifs_sink; } self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { -- GitLab From 60d67dcf02f09001ab99a3a0511d5bf906d71146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Thu, 6 Aug 2020 10:30:29 +0100 Subject: [PATCH 058/132] grandpa: never overwrite current rounds voter state (#6823) * grandpa: never overwrite current rounds voter state * grandpa: add test for voter state overwrite --- client/finality-grandpa/Cargo.toml | 2 +- client/finality-grandpa/src/environment.rs | 7 +- client/finality-grandpa/src/tests.rs | 202 ++++++++++++++------- 3 files changed, 148 insertions(+), 63 deletions(-) diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7b2e58b8be9..7cd3548a762 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -21,7 +21,6 @@ futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" -assert_matches = "1.3.0" parity-scale-codec = { version = "1.3.4", features = ["derive"] } sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } @@ -47,6 +46,7 @@ finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] +assert_matches = "1.3.0" finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } sc-network = { version = "0.8.0-rc5", path = "../network" } sc-network-test = { version = "0.8.0-rc5", path = "../network/test" } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 0cfab13a6fa..ca47e5e2cc4 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -930,7 +930,12 @@ where // remove the round from live rounds and start tracking the next round let mut current_rounds = current_rounds.clone(); current_rounds.remove(&round); - current_rounds.insert(round + 1, HasVoted::No); + + // NOTE: this condition should always hold as GRANDPA rounds are always + // started in increasing order, still it's better to play it safe. + if !current_rounds.contains_key(&(round + 1)) { + current_rounds.insert(round + 1, HasVoted::No); + } let set_state = VoterSetState::::Live { completed_rounds, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index e2b9671f04d..e2cdd7653a6 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -19,10 +19,11 @@ //! Tests and test helpers for GRANDPA. use super::*; +use assert_matches::assert_matches; use environment::HasVoted; use sc_network_test::{ - Block, Hash, TestNetFactory, BlockImportAdapter, Peer, - PeersClient, PassThroughVerifier, PeersFullClient, + Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, + TestClient, TestNetFactory, }; use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; use parking_lot::Mutex; @@ -53,16 +54,9 @@ use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; -type PeerData = - Mutex< - Option< - LinkHalf< - Block, - PeersFullClient, - LongestChain - > - > - >; +type TestLinkHalf = + LinkHalf>; +type PeerData = Mutex>; type GrandpaPeer = Peer; struct GrandpaTestNet { @@ -1519,10 +1513,67 @@ fn voter_catches_up_to_latest_round_when_behind() { ); } +type TestEnvironment = Environment< + substrate_test_runtime_client::Backend, + Block, + TestClient, + N, + LongestChain, + VR, +>; + +fn test_environment( + link: &TestLinkHalf, + keystore: Option, + network_service: N, + voting_rule: VR, +) -> TestEnvironment +where + N: NetworkT, + VR: VotingRule, +{ + let PersistentData { + ref authority_set, + ref consensus_changes, + ref set_state, + .. + } = link.persistent_data; + + let config = Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore, + name: None, + is_authority: true, + observer_enabled: true, + }; + + let network = NetworkBridge::new( + network_service.clone(), + config.clone(), + set_state.clone(), + None, + ); + + Environment { + authority_set: authority_set.clone(), + config: config.clone(), + consensus_changes: consensus_changes.clone(), + client: link.client.clone(), + select_chain: link.select_chain.clone(), + set_id: authority_set.set_id(), + voter_set_state: set_state.clone(), + voters: Arc::new(authority_set.current_authorities()), + network, + voting_rule, + metrics: None, + _phantom: PhantomData, + } +} + #[test] fn grandpa_environment_respects_voting_rules() { use finality_grandpa::Chain; - use sc_network_test::TestClient; let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); @@ -1532,63 +1583,28 @@ fn grandpa_environment_respects_voting_rules() { let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); - // create a voter environment with a given voting rule - let environment = |voting_rule: Box>| { - let PersistentData { - ref authority_set, - ref consensus_changes, - ref set_state, - .. - } = link.persistent_data; - - let config = Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: None, - is_authority: true, - observer_enabled: true, - }; - - let network = NetworkBridge::new( - network_service.clone(), - config.clone(), - set_state.clone(), - None, - ); - - Environment { - authority_set: authority_set.clone(), - config: config.clone(), - consensus_changes: consensus_changes.clone(), - client: link.client.clone(), - select_chain: link.select_chain.clone(), - set_id: authority_set.set_id(), - voter_set_state: set_state.clone(), - voters: Arc::new(authority_set.current_authorities()), - network, - voting_rule, - metrics: None, - _phantom: PhantomData, - } - }; - // add 21 blocks peer.push_blocks(21, false); // create an environment with no voting rule restrictions - let unrestricted_env = environment(Box::new(())); + let unrestricted_env = test_environment(&link, None, network_service.clone(), ()); // another with 3/4 unfinalized chain voting rule restriction - let three_quarters_env = environment(Box::new( - voting_rule::ThreeQuartersOfTheUnfinalizedChain - )); + let three_quarters_env = test_environment( + &link, + None, + network_service.clone(), + voting_rule::ThreeQuartersOfTheUnfinalizedChain, + ); // and another restricted with the default voting rules: i.e. 3/4 rule and // always below best block - let default_env = environment(Box::new( - VotingRulesBuilder::default().build() - )); + let default_env = test_environment( + &link, + None, + network_service.clone(), + VotingRulesBuilder::default().build(), + ); // the unrestricted environment should just return the best block assert_eq!( @@ -1648,6 +1664,70 @@ fn grandpa_environment_respects_voting_rules() { ); } +#[test] +fn grandpa_environment_never_overwrites_round_voter_state() { + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let link = peer.data.lock().take().unwrap(); + + let (keystore, _keystore_path) = create_keystore(peers[0]); + let environment = test_environment(&link, Some(keystore), network_service.clone(), ()); + + let round_state = || finality_grandpa::round::State::genesis(Default::default()); + let base = || Default::default(); + let historical_votes = || finality_grandpa::HistoricalVotes::new(); + + let get_current_round = |n| { + let current_rounds = environment + .voter_set_state + .read() + .with_current_round(n) + .map(|(_, current_rounds)| current_rounds.clone()) + .ok()?; + + Some(current_rounds.get(&n).unwrap().clone()) + }; + + // round 2 should not be tracked + assert_eq!(get_current_round(2), None); + + // after completing round 1 we should start tracking round 2 + environment + .completed(1, round_state(), base(), &historical_votes()) + .unwrap(); + + assert_eq!(get_current_round(2).unwrap(), HasVoted::No); + + let info = peer.client().info(); + + let prevote = finality_grandpa::Prevote { + target_hash: info.best_hash, + target_number: info.best_number, + }; + + // we prevote for round 2 which should lead to us updating the voter state + environment.prevoted(2, prevote.clone()).unwrap(); + + let has_voted = get_current_round(2).unwrap(); + + assert_matches!(has_voted, HasVoted::Yes(_, _)); + assert_eq!(*has_voted.prevote().unwrap(), prevote); + + // if we report round 1 as completed again we should not overwrite the + // voter state for round 2 + environment + .completed(1, round_state(), base(), &historical_votes()) + .unwrap(); + + assert_matches!(get_current_round(2).unwrap(), HasVoted::Yes(_, _)); +} + #[test] fn imports_justification_for_regular_blocks_on_import() { // NOTE: this is a regression test since initially we would only import -- GitLab From bcf9dc57ac9b5f29f5668d596ca525547eadba85 Mon Sep 17 00:00:00 2001 From: Hamza Tokuchi Date: Thu, 6 Aug 2020 12:31:03 +0200 Subject: [PATCH 059/132] Add RPC Builder to Substrate Node Template (#6808) * Pulled RPC from node and populated the node-template's RPC builder with one example implementation * surpress build errror * dead_code * Fixed module usage, removed copyright, removed rpc builder for light client + some comments * added a comment for rpc extension * Update bin/node-template/node/src/rpc.rs Co-authored-by: Shawn Tabrizi * Update rpc.rs * fix spacing * more space to tabs * more space to tabs * Documenation nitpick * Documentation nitpick * Documentation nitpick * Documentation nitpick * Documentation nitpick * pre-format * Updated transaction payment API implemented for node template * fix space and commented code * fix long line Co-authored-by: Shawn Tabrizi Co-authored-by: Dan Forbes --- Cargo.lock | 10 +++++ bin/node-template/node/Cargo.toml | 12 ++++- bin/node-template/node/src/lib.rs | 1 + bin/node-template/node/src/main.rs | 1 + bin/node-template/node/src/rpc.rs | 64 +++++++++++++++++++++++++++ bin/node-template/node/src/service.rs | 35 ++++++++++----- bin/node-template/runtime/Cargo.toml | 7 ++- bin/node-template/runtime/src/lib.rs | 15 +++++++ 8 files changed, 133 insertions(+), 12 deletions(-) create mode 100644 bin/node-template/node/src/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index fb3cd395cf2..20ed87326b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3789,7 +3789,9 @@ dependencies = [ name = "node-template" version = "2.0.0-rc5" dependencies = [ + "jsonrpc-core", "node-template-runtime", + "pallet-transaction-payment-rpc", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -3797,8 +3799,13 @@ dependencies = [ "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", + "sc-rpc", + "sc-rpc-api", "sc-service", "sc-transaction-pool", + "sp-api", + "sp-block-builder", + "sp-blockchain", "sp-consensus", "sp-consensus-aura", "sp-core", @@ -3808,6 +3815,7 @@ dependencies = [ "sp-transaction-pool", "structopt", "substrate-build-script-utils", + "substrate-frame-rpc-system", ] [[package]] @@ -3817,6 +3825,7 @@ dependencies = [ "frame-executive", "frame-support", "frame-system", + "frame-system-rpc-runtime-api", "pallet-aura", "pallet-balances", "pallet-grandpa", @@ -3825,6 +3834,7 @@ dependencies = [ "pallet-template", "pallet-timestamp", "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", "sp-api", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 82c2d7ad43b..0c988ebd1a2 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -33,7 +33,17 @@ sc-finality-grandpa = { version = "0.8.0-rc5", path = "../../../client/finality- sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc5"} + +# These dependencies are used for the node template's RPCs +jsonrpc-core = "14.0.3" +sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } +sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.8.0-rc5", path = "../../../client/rpc-api" } +sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "2.0.0-rc5", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment/rpc/" } node-template-runtime = { version = "2.0.0-rc5", path = "../runtime" } diff --git a/bin/node-template/node/src/lib.rs b/bin/node-template/node/src/lib.rs index 38e43372ca3..777c4f0a771 100644 --- a/bin/node-template/node/src/lib.rs +++ b/bin/node-template/node/src/lib.rs @@ -1,2 +1,3 @@ pub mod chain_spec; pub mod service; +pub mod rpc; diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 369e6932a03..4449d28b9fa 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -6,6 +6,7 @@ mod chain_spec; mod service; mod cli; mod command; +mod rpc; fn main() -> sc_cli::Result<()> { command::run() diff --git a/bin/node-template/node/src/rpc.rs b/bin/node-template/node/src/rpc.rs new file mode 100644 index 00000000000..c1f0e0a8457 --- /dev/null +++ b/bin/node-template/node/src/rpc.rs @@ -0,0 +1,64 @@ +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use node_template_runtime::{opaque::Block, AccountId, Balance, Index}; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; +use sp_block_builder::BlockBuilder; +pub use sc_rpc_api::DenyUnsafe; +use sp_transaction_pool::TransactionPool; + + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> jsonrpc_core::IoHandler where + C: ProvideRuntimeApi, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: BlockBuilder, + P: TransactionPool + 'static, +{ + use substrate_frame_rpc_system::{FullSystem, SystemApi}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + + let mut io = jsonrpc_core::IoHandler::default(); + let FullDeps { + client, + pool, + deny_unsafe, + } = deps; + + io.extend_with( + SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) + ); + + io.extend_with( + TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) + ); + + // Extend this RPC with a custom API by using the following syntax. + // `YourRpcStruct` should have a reference to a client, which is needed + // to call into the runtime. + // `io.extend_with(YourRpcTrait::to_delegate(YourRpcStruct::new(ReferenceToClient, ...)));` + + io +} diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 0de17103b05..339aafbefcd 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -73,7 +73,7 @@ pub fn new_partial(config: &Configuration) -> Result Result { +pub fn new_full(config: Configuration) -> Result { let sc_service::PartialComponents { client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, inherent_data_providers, @@ -93,7 +93,7 @@ pub fn new_full(config: Configuration) -> Result { on_demand: None, block_announce_validator_builder: None, finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), + finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -109,6 +109,21 @@ pub fn new_full(config: Configuration) -> Result { let prometheus_registry = config.prometheus_registry().cloned(); let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: pool.clone(), + deny_unsafe, + }; + + crate::rpc::create_full(deps) + }) + }; + sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), @@ -116,7 +131,7 @@ pub fn new_full(config: Configuration) -> Result { task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), telemetry_connection_sinks: telemetry_connection_sinks.clone(), - rpc_extensions_builder: Box::new(|_| ()), + rpc_extensions_builder: rpc_extensions_builder, on_demand: None, remote_blockchain: None, backend, network_status_sinks, system_rpc_tx, config, @@ -256,7 +271,7 @@ pub fn new_light(config: Configuration) -> Result { &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), ); } - + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), transaction_pool, @@ -264,12 +279,12 @@ pub fn new_light(config: Configuration) -> Result { on_demand: Some(on_demand), rpc_extensions_builder: Box::new(|_| ()), telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), - config, - client, - keystore, - backend, - network, - network_status_sinks, + config, + client, + keystore, + backend, + network, + network_status_sinks, system_rpc_tx, })?; diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 2bf31825428..011916880bf 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -12,7 +12,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } - aura = { version = "2.0.0-rc5", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } balances = { version = "2.0.0-rc5", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } @@ -36,6 +35,10 @@ sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../pri sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/transaction-pool" } sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/version" } +# Used for the node template's RPCs +frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } + template = { version = "2.0.0-rc5", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] @@ -67,5 +70,7 @@ std = [ "system/std", "timestamp/std", "transaction-payment/std", + "frame-system-rpc-runtime-api/std", + "pallet-transaction-payment-rpc-runtime-api/std", "template/std", ] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c46d515a3ef..c7cd80868bf 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -417,4 +417,19 @@ impl_runtime_apis! { None } } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + } } -- GitLab From 52494a465ab68904343c0525e86261ebcb166808 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 6 Aug 2020 14:46:34 +0200 Subject: [PATCH 060/132] Delay network startup to after complete initialization (#6833) * Delay network startup to after complete initialization * Update client/service/src/builder.rs Co-authored-by: Ashley Co-authored-by: Ashley --- bin/node-template/node/src/service.rs | 7 ++-- bin/node/cli/src/service.rs | 18 +++++----- client/service/src/builder.rs | 50 ++++++++++++++++++++++++--- client/service/src/lib.rs | 2 +- 4 files changed, 62 insertions(+), 15 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 339aafbefcd..021d0ac8f7d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -83,7 +83,7 @@ pub fn new_full(config: Configuration) -> Result { let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx) = + let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -215,6 +215,7 @@ pub fn new_full(config: Configuration) -> Result { )?; } + network_starter.start_network(); Ok(task_manager) } @@ -253,7 +254,7 @@ pub fn new_light(config: Configuration) -> Result { let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx) = + let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -288,5 +289,7 @@ pub fn new_light(config: Configuration) -> Result { system_rpc_tx, })?; + network_starter.start_network(); + Ok(task_manager) } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a47869ed832..fdfa6816296 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -164,8 +164,8 @@ pub fn new_full_base( let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - - let (network, network_status_sinks, system_rpc_tx) = + + let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -206,7 +206,7 @@ pub fn new_full_base( network_status_sinks, system_rpc_tx, })?; - + let (block_import, grandpa_link, babe_link) = import_setup; let shared_voter_state = rpc_setup; @@ -322,6 +322,7 @@ pub fn new_full_base( )?; } + network_starter.start_network(); Ok((task_manager, inherent_data_providers, client, network, transaction_pool)) } @@ -383,7 +384,7 @@ pub fn new_light_base(config: Configuration) -> Result<( let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx) = + let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -395,7 +396,8 @@ pub fn new_light_base(config: Configuration) -> Result<( finality_proof_request_builder: Some(finality_proof_request_builder), finality_proof_provider: Some(finality_proof_provider), })?; - + network_starter.start_network(); + if config.offchain_worker.enabled { sc_service::build_offchain_workers( &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), @@ -412,7 +414,7 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); let rpc_handlers = - sc_service::spawn_tasks(sc_service::SpawnTasksParams { + sc_service::spawn_tasks(sc_service::SpawnTasksParams { on_demand: Some(on_demand), remote_blockchain: Some(backend.remote_blockchain()), rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), @@ -423,7 +425,7 @@ pub fn new_light_base(config: Configuration) -> Result<( telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), task_manager: &mut task_manager, })?; - + Ok((task_manager, rpc_handlers, client, network, transaction_pool)) } @@ -498,7 +500,7 @@ mod tests { setup_handles = Some((block_import.clone(), babe_link.clone())); } )?; - + let node = sc_service_test::TestNetComponents::new( keep_alive, client, network, transaction_pool ); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4c7c1f57ee0..fc9303498d6 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -33,7 +33,7 @@ use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, import_queue::ImportQueue, }; -use futures::{FutureExt, StreamExt, future::ready}; +use futures::{FutureExt, StreamExt, future::ready, channel::oneshot}; use jsonrpc_pubsub::manager::SubscriptionManager; use sc_keystore::Store as Keystore; use log::{info, warn, error}; @@ -668,7 +668,7 @@ fn build_telemetry( let startup_time = SystemTime::UNIX_EPOCH.elapsed() .map(|dur| dur.as_millis()) .unwrap_or(0); - + spawn_handle.spawn( "telemetry-worker", telemetry.clone() @@ -822,6 +822,7 @@ pub fn build_network( Arc::Hash>>, NetworkStatusSinks, TracingUnboundedSender>, + NetworkStarter, ), Error > @@ -900,6 +901,22 @@ pub fn build_network( config.announce_block, ); + // TODO: Normally, one is supposed to pass a list of notifications protocols supported by the + // node through the `NetworkConfiguration` struct. But because this function doesn't know in + // advance which components, such as GrandPa or Polkadot, will be plugged on top of the + // service, it is unfortunately not possible to do so without some deep refactoring. To bypass + // this problem, the `NetworkService` provides a `register_notifications_protocol` method that + // can be called even after the network has been initialized. However, we want to avoid the + // situation where `register_notifications_protocol` is called *after* the network actually + // connects to other peers. For this reason, we delay the process of the network future until + // the user calls `NetworkStarter::start_network`. + // + // This entire hack should eventually be removed in favour of passing the list of protocols + // through the configuration. + // + // See also https://github.com/paritytech/substrate/issues/6827 + let (network_start_tx, network_start_rx) = oneshot::channel(); + // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -907,7 +924,32 @@ pub fn build_network( // issue, and ideally we would like to fix the network future to take as little time as // possible, but we also take the extra harm-prevention measure to execute the networking // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", future); + spawn_handle.spawn_blocking("network-worker", async move { + if network_start_rx.await.is_err() { + debug_assert!(false); + log::warn!( + "The NetworkStart returned as part of `build_network` has been silently dropped" + ); + // This `return` might seem unnecessary, but we don't want to make it look like + // everything is working as normal even though the user is clearly misusing the API. + return; + } + + future.await + }); + + Ok((network, network_status_sinks, system_rpc_tx, NetworkStarter(network_start_tx))) +} - Ok((network, network_status_sinks, system_rpc_tx)) +/// Object used to start the network. +#[must_use] +pub struct NetworkStarter(oneshot::Sender<()>); + +impl NetworkStarter { + /// Start the network. Call this after all sub-components have been initialized. + /// + /// > **Note**: If you don't call this function, the networking will not work. + pub fn start_network(self) { + let _ = self.0.send(()); + } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 40826a70d45..415a5de4f93 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -53,7 +53,7 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ new_full_client, new_client, new_full_parts, new_light_parts, - spawn_tasks, build_network, BuildNetworkParams, build_offchain_workers, + spawn_tasks, build_network, BuildNetworkParams, NetworkStarter, build_offchain_workers, SpawnTasksParams, TFullClient, TLightClient, TFullBackend, TLightBackend, TLightBackendWithHash, TLightClientWithBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, -- GitLab From e2589a862c69ec487df6c1954f208205c9e6117c Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Thu, 6 Aug 2020 12:18:35 -0400 Subject: [PATCH 061/132] De-alias frame_system in node template runtime (#6829) * de-alias frame_system in node template * Fix line length * Fix chainspec --- bin/node-template/node/src/chain_spec.rs | 2 +- bin/node-template/runtime/Cargo.toml | 4 ++-- bin/node-template/runtime/src/lib.rs | 24 +++++++++++++++--------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index e49457b0b9a..bfa1b8b8ce6 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -134,7 +134,7 @@ fn testnet_genesis( _enable_println: bool, ) -> GenesisConfig { GenesisConfig { - system: Some(SystemConfig { + frame_system: Some(SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), changes_trie_config: Default::default(), diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 011916880bf..7523ad776dd 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -18,7 +18,7 @@ frame-support = { version = "2.0.0-rc5", default-features = false, path = "../.. grandpa = { version = "2.0.0-rc5", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } sudo = { version = "2.0.0-rc5", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } -system = { version = "2.0.0-rc5", default-features = false, package = "frame-system", path = "../../../frame/system" } +frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } timestamp = { version = "2.0.0-rc5", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } transaction-payment = { version = "2.0.0-rc5", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } @@ -67,7 +67,7 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "sudo/std", - "system/std", + "frame-system/std", "timestamp/std", "transaction-payment/std", "frame-system-rpc-runtime-api/std", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c7cd80868bf..eebbfe2c5de 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -133,7 +133,7 @@ parameter_types! { // Configure FRAME pallets to include in runtime. -impl system::Trait for Runtime { +impl frame_system::Trait for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = (); /// The identifier used to distinguish between accounts. @@ -269,7 +269,7 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Config, Storage, Event}, + System: frame_system::{Module, Call, Config, Storage, Event}, RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, Aura: aura::{Module, Config, Inherent}, @@ -294,12 +294,12 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - system::CheckSpecVersion, - system::CheckTxVersion, - system::CheckGenesis, - system::CheckEra, - system::CheckNonce, - system::CheckWeight, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, transaction_payment::ChargeTransactionPayment ); /// Unchecked extrinsic type as expected by this runtime. @@ -307,7 +307,13 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllModules, +>; impl_runtime_apis! { impl sp_api::Core for Runtime { -- GitLab From bd1a16c318a5f42f1a0176f33bad61bad491c771 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 6 Aug 2020 21:20:46 +0200 Subject: [PATCH 062/132] Allow task manager to have children (#6771) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * WIP Forked at: 19c1d9028d8d6eabef41693433b56e14da025247 Parent branch: origin/master * changelog * Remove Box * Make future nicer * Revert "Make future nicer" This reverts commit 49fb8fb6f245c3ca2c384468df14b34f34616736. * Simplify * Additional check * Simplify more Co-authored-by: Bastian Köcher --- client/service/src/task_manager/mod.rs | 39 ++++++++- client/service/src/task_manager/tests.rs | 106 ++++++++++++++++++++++- docs/CHANGELOG.md | 5 ++ 3 files changed, 144 insertions(+), 6 deletions(-) diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index e0e8699ce1d..729b43bce94 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -18,7 +18,7 @@ use exit_future::Signal; use log::{debug, error}; use futures::{ Future, FutureExt, StreamExt, - future::{select, Either, BoxFuture}, + future::{select, Either, BoxFuture, join_all, try_join_all, pending}, sink::SinkExt, }; use prometheus_endpoint::{ @@ -214,8 +214,14 @@ pub struct TaskManager { essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. keep_alive: Box, + /// A sender to a stream of background tasks. This is used for the completion future. task_notifier: TracingUnboundedSender, + /// This future will complete when all the tasks are joined and the stream is closed. completion_future: JoinFuture, + /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent + /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential + /// task fails. + children: Vec, } impl TaskManager { @@ -251,6 +257,7 @@ impl TaskManager { keep_alive: Box::new(()), task_notifier, completion_future, + children: Vec::new(), }) } @@ -271,12 +278,21 @@ impl TaskManager { /// Send the signal for termination, prevent new tasks to be created, await for all the existing /// tasks to be finished and drop the object. You can consider this as an async drop. + /// + /// It's always better to call and await this function before exiting the process as background + /// tasks may be running in the background. If the process exit and the background tasks are not + /// cancelled, this will lead to objects not getting dropped properly. + /// + /// This is an issue in some cases as some of our dependencies do require that we drop all the + /// objects properly otherwise it triggers a SIGABRT on exit. pub fn clean_shutdown(mut self) -> Pin + Send>> { self.terminate(); + let children_shutdowns = self.children.into_iter().map(|x| x.clean_shutdown()); let keep_alive = self.keep_alive; let completion_future = self.completion_future; Box::pin(async move { + join_all(children_shutdowns).await; completion_future.await; drop(keep_alive); }) @@ -293,10 +309,17 @@ impl TaskManager { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); + let mut t3 = try_join_all( + self.children.iter_mut().map(|x| x.future()) + // Never end this future if there is no error because if there is no children, + // it must not stop + .chain(std::iter::once(pending().boxed())) + ).fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), _ = t2 => Ok(()), + res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")), } }) } @@ -305,15 +328,25 @@ impl TaskManager { pub fn terminate(&mut self) { if let Some(signal) = self.signal.take() { let _ = signal.fire(); - // NOTE: task will prevent new tasks to be spawned + // NOTE: this will prevent new tasks to be spawned self.task_notifier.close_channel(); + for child in self.children.iter_mut() { + child.terminate(); + } } } - /// Set what the task manager should keep alivei + /// Set what the task manager should keep alive. pub(super) fn keep_alive(&mut self, to_keep_alive: T) { self.keep_alive = Box::new(to_keep_alive); } + + /// Register another TaskManager to terminate and gracefully shutdown when the parent + /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential + /// task fails. (But don't end the parent if a child's normal task fails.) + pub fn add_children(&mut self, child: TaskManager) { + self.children.push(child); + } } #[derive(Clone)] diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index c60d15b3394..a2bd84802aa 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -18,7 +18,7 @@ use crate::config::TaskExecutor; use crate::task_manager::TaskManager; -use futures::future::FutureExt; +use futures::{future::FutureExt, pin_mut, select}; use parking_lot::Mutex; use std::any::Any; use std::sync::Arc; @@ -82,7 +82,7 @@ async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) } #[test] -fn ensure_futures_are_awaited_on_shutdown() { +fn ensure_tasks_are_awaited_on_shutdown() { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -187,7 +187,7 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() { } #[test] -fn ensure_task_manager_future_ends_with_error_when_essential_task_ends() { +fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); @@ -208,3 +208,103 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_ends() { runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); } + +#[test] +fn ensure_children_tasks_ends_when_task_manager_terminated() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_children(child_1); + task_manager.add_children(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + task_manager.terminate(); + runtime.block_on(task_manager.future()).expect("future has ended without error"); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_children(child_1); + task_manager.add_children(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") }); + runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + assert_eq!(drop_tester, 4); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_children(child_1); + task_manager.add_children(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_handle_child_1.spawn("task5", async { panic!("task failed") }); + runtime.block_on(async { + let t1 = task_manager.future().fuse(); + let t2 = tokio::time::delay_for(Duration::from_secs(3)).fuse(); + + pin_mut!(t1, t2); + + select! { + res = t1 => panic!("task should not have stopped: {:?}", res), + _ = t2 => {}, + } + }); + assert_eq!(drop_tester, 4); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 333733d1aee..719059477e1 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## Unreleased +Client +------ + +* Child nodes can be handled by adding a child `TaskManager` to the parent's `TaskManager` (#6771) + ## 2.0.0-rc4 -> 2.0.0-rc5 – River Dolphin Runtime -- GitLab From f17569fa4c7fa930eab1dd07a359fde7e3019093 Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Thu, 6 Aug 2020 15:23:31 -0400 Subject: [PATCH 063/132] De-alias pallets in node template runtime (#6836) * dealias pallets * Restore accidentally deleted code blocks --- bin/node-template/node/src/chain_spec.rs | 8 ++--- bin/node-template/runtime/Cargo.toml | 31 ++++++++--------- bin/node-template/runtime/src/lib.rs | 42 ++++++++++++------------ 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index bfa1b8b8ce6..41f582fb64a 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -139,17 +139,17 @@ fn testnet_genesis( code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(BalancesConfig { + pallet_balances: Some(BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }), - aura: Some(AuraConfig { + pallet_aura: Some(AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), }), - grandpa: Some(GrandpaConfig { + pallet_grandpa: Some(GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), }), - sudo: Some(SudoConfig { + pallet_sudo: Some(SudoConfig { // Assign network admin rights. key: root_key, }), diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 7523ad776dd..f4e8697a47d 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -12,15 +12,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -aura = { version = "2.0.0-rc5", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } -balances = { version = "2.0.0-rc5", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } + +pallet-aura = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/aura" } +pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/balances" } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0-rc5", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } -randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } -sudo = { version = "2.0.0-rc5", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/sudo" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -timestamp = { version = "2.0.0-rc5", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } -transaction-payment = { version = "2.0.0-rc5", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment" } frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/api" } @@ -47,13 +48,17 @@ wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-run [features] default = ["std"] std = [ - "aura/std", - "balances/std", "codec/std", "frame-executive/std", "frame-support/std", - "grandpa/std", - "randomness-collective-flip/std", + "pallet-aura/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-randomness-collective-flip/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", "serde", "sp-api/std", "sp-block-builder/std", @@ -66,11 +71,7 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "sudo/std", "frame-system/std", - "timestamp/std", - "transaction-payment/std", "frame-system-rpc-runtime-api/std", - "pallet-transaction-payment-rpc-runtime-api/std", "template/std", ] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index eebbfe2c5de..06e34e45516 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -17,8 +17,8 @@ use sp_runtime::traits::{ }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use grandpa::fg_primitives; +use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_grandpa::fg_primitives; use sp_version::RuntimeVersion; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -26,8 +26,8 @@ use sp_version::NativeVersion; // A few exports that help ease life for downstream crates. #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; pub use sp_runtime::{Permill, Perbill}; pub use frame_support::{ construct_runtime, parameter_types, StorageValue, @@ -187,16 +187,16 @@ impl frame_system::Trait for Runtime { /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); /// The data to be stored in an account. - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); } -impl aura::Trait for Runtime { +impl pallet_aura::Trait for Runtime { type AuthorityId = AuraId; } -impl grandpa::Trait for Runtime { +impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; @@ -217,7 +217,7 @@ parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl timestamp::Trait for Runtime { +impl pallet_timestamp::Trait for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; @@ -229,7 +229,7 @@ parameter_types! { pub const ExistentialDeposit: u128 = 500; } -impl balances::Trait for Runtime { +impl pallet_balances::Trait for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. @@ -244,15 +244,15 @@ parameter_types! { pub const TransactionByteFee: Balance = 1; } -impl transaction_payment::Trait for Runtime { - type Currency = balances::Module; +impl pallet_transaction_payment::Trait for Runtime { + type Currency = Balances; type OnTransactionPayment = (); type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl sudo::Trait for Runtime { +impl pallet_sudo::Trait for Runtime { type Event = Event; type Call = Call; } @@ -270,13 +270,13 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, - Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Aura: aura::{Module, Config, Inherent}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - Balances: balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: transaction_payment::{Module, Storage}, - Sudo: sudo::{Module, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Aura: pallet_aura::{Module, Config, Inherent}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, + Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, // Include the custom logic from the template pallet in the runtime. TemplateModule: template::{Module, Call, Storage, Event}, } @@ -300,7 +300,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - transaction_payment::ChargeTransactionPayment + pallet_transaction_payment::ChargeTransactionPayment ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; @@ -423,7 +423,7 @@ impl_runtime_apis! { None } } - + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) -- GitLab From 5719b8cab597e04ee0d4c30f8aed9acc3921e7d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Thu, 6 Aug 2020 20:43:36 +0100 Subject: [PATCH 064/132] grandpa: fix enacting forced changes with no delay (#6828) * grandpa: fix enacting forced changes with no delay * grandpa: fix formatting --- client/finality-grandpa/src/authorities.rs | 42 ++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 117f5cad5e3..7a064d7a622 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -356,8 +356,9 @@ where .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far .filter(|c| c.effective_number() == best_number) { - // check if the given best block is in the same branch as the block that signaled the change. - if is_descendent_of(&change.canon_hash, &best_hash)? { + // check if the given best block is in the same branch as + // the block that signaled the change. + if change.canon_hash == best_hash || is_descendent_of(&change.canon_hash, &best_hash)? { // apply this change: make the set canonical afg_log!(initial_sync, "👴 Applying authority set change forced at block #{:?}", @@ -984,6 +985,43 @@ mod tests { ); } + #[test] + fn forced_changes_with_no_delay() { + // NOTE: this is a regression test + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; + + // we create a forced change with no delay + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 0, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + // and import it + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + + // it should be enacted at the same block that signaled it + assert!( + authorities + .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false) + .unwrap() + .is_some() + ); + } + #[test] fn next_change_works() { let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; -- GitLab From 2e9b63b1f1fc4d5a018e9801cdef4a54e9839857 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 7 Aug 2020 10:56:32 +0200 Subject: [PATCH 065/132] Renamed add_children to add_child since it adds only one child (#6838) --- client/service/src/task_manager/mod.rs | 2 +- client/service/src/task_manager/tests.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 729b43bce94..6925d27f4e5 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -344,7 +344,7 @@ impl TaskManager { /// Register another TaskManager to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. (But don't end the parent if a child's normal task fails.) - pub fn add_children(&mut self, child: TaskManager) { + pub fn add_child(&mut self, child: TaskManager) { self.children.push(child); } } diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index a2bd84802aa..27d9b0b9e9a 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -220,8 +220,8 @@ fn ensure_children_tasks_ends_when_task_manager_terminated() { let spawn_handle_child_1 = child_1.spawn_handle(); let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_children(child_1); - task_manager.add_children(child_2); + task_manager.add_child(child_1); + task_manager.add_child(child_2); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); @@ -250,8 +250,8 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_children(child_1); - task_manager.add_children(child_2); + task_manager.add_child(child_1); + task_manager.add_child(child_2); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); @@ -280,8 +280,8 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { let spawn_handle_child_1 = child_1.spawn_handle(); let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_children(child_1); - task_manager.add_children(child_2); + task_manager.add_child(child_1); + task_manager.add_child(child_2); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); -- GitLab From 74804b5649eccfb83c90aec87bdca58e5d5c8789 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 7 Aug 2020 13:58:51 +0200 Subject: [PATCH 066/132] Revalidate transactions only on latest best block (#6824) * Revalidate transactions only on latest best block We should revalidate transactions only on the latest best block and not on any arbitrary block. The revalidation before failed when there were multiple blocks on the height given to the revalidation function, but no block was imported as best block. * Update test-utils/runtime/transaction-pool/src/lib.rs Co-authored-by: Jaco Greeff * Fix tests * Only process best blocks in the transaction pool Co-authored-by: Jaco Greeff --- bin/node/cli/src/service.rs | 4 +- client/api/src/client.rs | 20 +- .../basic-authorship/src/basic_authorship.rs | 4 +- client/consensus/manual-seal/src/lib.rs | 14 +- client/transaction-pool/src/lib.rs | 17 +- client/transaction-pool/src/revalidation.rs | 21 +- client/transaction-pool/src/testing/pool.rs | 227 +++++++----------- primitives/transaction-pool/src/pool.rs | 8 +- .../runtime/transaction-pool/src/lib.rs | 96 ++++++-- 9 files changed, 205 insertions(+), 206 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index fdfa6816296..fd2c240a44e 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -523,11 +523,9 @@ mod tests { futures::executor::block_on( service.transaction_pool().maintain( - ChainEvent::NewBlock { - is_new_best: true, + ChainEvent::NewBestBlock { hash: parent_header.hash(), tree_route: None, - header: parent_header.clone(), }, ) ); diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 35d40965e64..f97daa48763 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -16,7 +16,7 @@ //! A set of APIs supported by the client along with their primitives. -use std::{fmt, collections::HashSet, sync::Arc}; +use std::{fmt, collections::HashSet, sync::Arc, convert::TryFrom}; use sp_core::storage::StorageKey; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, @@ -252,13 +252,17 @@ pub struct FinalityNotification { pub header: Block::Header, } -impl From> for sp_transaction_pool::ChainEvent { - fn from(n: BlockImportNotification) -> Self { - Self::NewBlock { - is_new_best: n.is_new_best, - hash: n.hash, - header: n.header, - tree_route: n.tree_route, +impl TryFrom> for sp_transaction_pool::ChainEvent { + type Error = (); + + fn try_from(n: BlockImportNotification) -> Result { + if n.is_new_best { + Ok(Self::NewBestBlock { + hash: n.hash, + tree_route: n.tree_route, + }) + } else { + Err(()) } } } diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 3c56bdd33db..41d12970464 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -346,11 +346,9 @@ mod tests { fn chain_event(header: B::Header) -> ChainEvent where NumberFor: From { - ChainEvent::NewBlock { + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None, - is_new_best: true, - header, } } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 2799a498c1f..36aeffd9794 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -207,6 +207,7 @@ mod tests { use sp_consensus::ImportedAux; use sp_inherents::InherentDataProviders; use sc_basic_authorship::ProposerFactory; + use sc_client_api::BlockBackend; fn api() -> Arc { Arc::new(TestApi::empty()) @@ -415,15 +416,13 @@ mod tests { } } ); - // assert that there's a new block in the db. - assert!(client.header(&BlockId::Number(0)).unwrap().is_some()); + let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block; + pool_api.add_block(block, true); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); - pool.maintain(sp_transaction_pool::ChainEvent::NewBlock { + pool.maintain(sp_transaction_pool::ChainEvent::NewBestBlock { hash: header.hash(), - header, - is_new_best: true, tree_route: None, }).await; @@ -438,10 +437,11 @@ mod tests { rx1.await.expect("should be no error receiving"), Ok(_) ); - assert!(client.header(&BlockId::Number(1)).unwrap().is_some()); + let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; + pool_api.add_block(block, true); pool_api.increment_nonce(Alice.into()); - assert!(pool.submit_one(&BlockId::Number(2), SOURCE, uxt(Alice, 2)).await.is_ok()); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 2)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); assert!(sink.send(EngineCommand::SealNewBlock { parent_hash: Some(created_block.hash), diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 6255fd478b7..0b6a1e935b9 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -34,7 +34,7 @@ pub mod testing; pub use sc_transaction_graph as txpool; pub use crate::api::{FullChainApi, LightChainApi}; -use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin}; +use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin, convert::TryInto}; use futures::{prelude::*, future::{self, ready}, channel::oneshot}; use parking_lot::Mutex; @@ -549,7 +549,7 @@ impl MaintainedTransactionPool for BasicPool { fn maintain(&self, event: ChainEvent) -> Pin + Send>> { match event { - ChainEvent::NewBlock { hash, tree_route, is_new_best, .. } => { + ChainEvent::NewBestBlock { hash, tree_route } => { let pool = self.pool.clone(); let api = self.api.clone(); @@ -608,10 +608,7 @@ impl MaintainedTransactionPool for BasicPool }) } - // If this is a new best block, we need to prune its transactions from the pool. - if is_new_best { - pruned_log.extend(prune_known_txs_for_block(id.clone(), &*api, &*pool).await); - } + pruned_log.extend(prune_known_txs_for_block(id.clone(), &*api, &*pool).await); metrics.report( |metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) @@ -690,9 +687,9 @@ impl MaintainedTransactionPool for BasicPool .map(|tx| tx.hash.clone()) .collect(); revalidation_queue.revalidate_later(block_number, hashes).await; - } - revalidation_strategy.lock().clear(); + revalidation_strategy.lock().clear(); + } }.boxed() } ChainEvent::Finalized { hash } => { @@ -721,7 +718,9 @@ pub async fn notification_future( Client: sc_client_api::BlockchainEvents, Pool: MaintainedTransactionPool, { - let import_stream = client.import_notification_stream().map(Into::into).fuse(); + let import_stream = client.import_notification_stream() + .filter_map(|n| ready(n.try_into().ok())) + .fuse(); let finality_stream = client.finality_notification_stream() .map(Into::into) .fuse(); diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index af9a76c055b..7be8688eaea 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -211,8 +211,7 @@ impl RevalidationWorker { mut self, from_queue: TracingUnboundedReceiver>, interval: R, - ) where R: Send, R::Guard: Send - { + ) where R: Send, R::Guard: Send { let interval = interval.into_stream().fuse(); let from_queue = from_queue.fuse(); futures::pin_mut!(interval, from_queue); @@ -253,7 +252,7 @@ impl RevalidationWorker { if this.members.len() > 0 { log::debug!( target: "txpool", - "Updated revalidation queue at {}. Transactions: {:?}", + "Updated revalidation queue at {:?}. Transactions: {:?}", this.best_block, this.members, ); @@ -298,9 +297,7 @@ where api: Arc, pool: Arc>, interval: R, - ) -> (Self, Pin + Send>>) - where R: Send + 'static, R::Guard: Send - { + ) -> (Self, Pin + Send>>) where R: Send + 'static, R::Guard: Send { let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); let worker = RevalidationWorker::new(api.clone(), pool.clone()); @@ -338,16 +335,22 @@ where /// If queue configured with background worker, this will return immediately. /// If queue configured without background worker, this will resolve after /// revalidation is actually done. - pub async fn revalidate_later(&self, at: NumberFor, transactions: Vec>) { + pub async fn revalidate_later( + &self, + at: NumberFor, + transactions: Vec>, + ) { if transactions.len() > 0 { - log::debug!(target: "txpool", "Sent {} transactions to revalidation queue", transactions.len()); + log::debug!( + target: "txpool", "Sent {} transactions to revalidation queue", + transactions.len(), + ); } if let Some(ref to_worker) = self.background { if let Err(e) = to_worker.unbounded_send(WorkerPayload { at, transactions }) { log::warn!(target: "txpool", "Failed to update background worker: {:?}", e); } - return; } else { let pool = self.pool.clone(); let api = self.api.clone(); diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index a938313733e..8fa742cd419 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -106,7 +106,7 @@ fn prune_tags_should_work() { let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); - pool.validated_pool().api().push_block(1, Vec::new()); + pool.validated_pool().api().push_block(1, Vec::new(), true); block_on( pool.prune_tags( &BlockId::number(1), @@ -141,25 +141,14 @@ fn only_prune_on_new_best() { let uxt = uxt(Alice, 209); let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, uxt.clone()) + pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(1, vec![uxt.clone()]); + pool.api.push_block(1, vec![uxt.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { + let header = pool.api.push_block(2, vec![uxt], true); + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: false, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); - - let header = pool.api.push_block(2, vec![uxt]); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; block_on(pool.maintain(event)); @@ -179,7 +168,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // remove the transaction that just got imported. api.increment_nonce(Alice.into()); - api.push_block(1, Vec::new()); + api.push_block(1, Vec::new(), true); block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![])).expect("1. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); // it's re-imported to future @@ -187,7 +176,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // so now let's insert another transaction that also provides the 155 api.increment_nonce(Alice.into()); - api.push_block(2, Vec::new()); + api.push_block(2, Vec::new(), true); let xt = uxt(Alice, 211); block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); @@ -197,18 +186,16 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // prune it and make sure the pool is empty api.increment_nonce(Alice.into()); - api.push_block(3, Vec::new()); + api.push_block(3, Vec::new(), true); block_on(pool.prune_tags(&BlockId::number(3), vec![vec![155]], vec![])).expect("2. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 2); } fn block_event(header: Header) -> ChainEvent { - ChainEvent::NewBlock { + ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, tree_route: None, - header, } } @@ -219,11 +206,9 @@ fn block_event_with_retracted( ) -> ChainEvent { let tree_route = api.tree_route(retracted_start, header.parent_hash).expect("Tree route exists"); - ChainEvent::NewBlock { + ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, tree_route: Some(Arc::new(tree_route)), - header, } } @@ -236,7 +221,7 @@ fn should_prune_old_during_maintenance() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![xt.clone()]); + let header = pool.api.push_block(1, vec![xt.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); @@ -253,7 +238,7 @@ fn should_revalidate_during_maintenance() { assert_eq!(pool.status().ready, 2); assert_eq!(pool.api.validation_requests().len(), 2); - let header = pool.api.push_block(1, vec![xt1.clone()]); + let header = pool.api.push_block(1, vec![xt1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 1); @@ -272,8 +257,8 @@ fn should_resubmit_from_retracted_during_maintenance() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![]); - let fork_header = pool.api.push_block(1, vec![]); + let header = pool.api.push_block(1, vec![], true); + let fork_header = pool.api.push_block(1, vec![], false); let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); @@ -291,8 +276,8 @@ fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacte block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![xt.clone()]); - let fork_header = pool.api.push_block(1, vec![xt]); + let header = pool.api.push_block(1, vec![xt.clone()], true); + let fork_header = pool.api.push_block(1, vec![xt], false); let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); @@ -309,8 +294,8 @@ fn should_not_retain_invalid_hashes_from_retracted() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![]); - let fork_header = pool.api.push_block(1, vec![xt.clone()]); + let header = pool.api.push_block(1, vec![], true); + let fork_header = pool.api.push_block(1, vec![xt.clone()], false); pool.api.add_invalid(&xt); let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); @@ -330,14 +315,14 @@ fn should_revalidate_transaction_multiple_times() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![xt.clone()]); + let header = pool.api.push_block(1, vec![xt.clone()], true); block_on(pool.maintain(block_event(header))); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(2, vec![]); + let header = pool.api.push_block(2, vec![], true); pool.api.add_invalid(&xt); block_on(pool.maintain(block_event(header))); @@ -354,18 +339,18 @@ fn should_revalidate_across_many_blocks() { let (pool, _guard, mut notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt2.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 2); - let header = pool.api.push_block(1, vec![]); + let header = pool.api.push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); - block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt3.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt3.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api.push_block(2, vec![xt1.clone()]); + let header = pool.api.push_block(2, vec![xt1.clone()], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); @@ -411,7 +396,7 @@ fn should_push_watchers_during_maintaince() { pool.api.add_invalid(&tx4); // clear timer events if any - let header = pool.api.push_block(1, vec![]); + let header = pool.api.push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); @@ -429,7 +414,7 @@ fn should_push_watchers_during_maintaince() { ); // when - let header = pool.api.push_block(2, vec![tx0, tx1, tx2]); + let header = pool.api.push_block(2, vec![tx0, tx1, tx2], true); let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); @@ -478,18 +463,16 @@ fn can_track_heap_size() { fn finalization() { let xt = uxt(Alice, 209); let api = TestApi::with_alice_nonce(209); - api.push_block(1, vec![]); + api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); let watcher = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) ).expect("1. Imported"); - pool.api.push_block(2, vec![xt.clone()]); + pool.api.push_block(2, vec![xt.clone()], true); - let header = pool.api.chain().read().block_by_number.get(&2).unwrap()[0].header().clone(); - let event = ChainEvent::NewBlock { + let header = pool.api.chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; block_on(pool.maintain(event)); @@ -508,7 +491,7 @@ fn finalization() { fn fork_aware_finalization() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![]); + api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); let mut canon_watchers = vec![]; @@ -534,14 +517,12 @@ fn fork_aware_finalization() { let watcher = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![from_alice.clone()]); + let header = pool.api.push_block(2, vec![from_alice.clone()], true); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; b1 = header.hash(); @@ -553,15 +534,13 @@ fn fork_aware_finalization() { // block C2 { - let header = pool.api.push_block_with_parent(b1, vec![from_dave.clone()]); + let header = pool.api.push_block_with_parent(b1, vec![from_dave.clone()], true); from_dave_watcher = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()) ).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; c2 = header.hash(); @@ -575,12 +554,10 @@ fn fork_aware_finalization() { pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()) ).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block_with_parent(c2, vec![from_bob.clone()]); + let header = pool.api.push_block_with_parent(c2, vec![from_bob.clone()], true); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; d2 = header.hash(); @@ -594,7 +571,7 @@ fn fork_aware_finalization() { pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()) ).expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(3, vec![from_charlie.clone()]); + let header = pool.api.push_block(3, vec![from_charlie.clone()], true); canon_watchers.push((watcher, header.hash())); let event = block_event_with_retracted(header.clone(), d2, &*pool.api); @@ -612,13 +589,11 @@ fn fork_aware_finalization() { pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) ).expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api.push_block(4, vec![xt.clone()]); + let header = pool.api.push_block(4, vec![xt.clone()], true); canon_watchers.push((w, header.hash())); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; d1 = header.hash(); @@ -632,12 +607,10 @@ fn fork_aware_finalization() { // block e1 { - let header = pool.api.push_block(5, vec![from_dave, from_bob]); + let header = pool.api.push_block(5, vec![from_dave, from_bob], true); e1 = header.hash(); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; block_on(pool.maintain(event)); @@ -684,7 +657,7 @@ fn fork_aware_finalization() { fn prune_and_retract_tx_at_same_time() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![]); + api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); @@ -697,13 +670,11 @@ fn prune_and_retract_tx_at_same_time() { // Block B1 let b1 = { - let header = pool.api.push_block(2, vec![from_alice.clone()]); + let header = pool.api.push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; block_on(pool.maintain(event)); @@ -713,7 +684,7 @@ fn prune_and_retract_tx_at_same_time() { // Block B2 let b2 = { - let header = pool.api.push_block(2, vec![from_alice.clone()]); + let header = pool.api.push_block(2, vec![from_alice.clone()], false); assert_eq!(pool.status().ready, 0); let event = block_event_with_retracted(header.clone(), b1, &*pool.api); @@ -757,7 +728,7 @@ fn prune_and_retract_tx_at_same_time() { fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![]); + api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); @@ -773,13 +744,11 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx0.clone()]); + let header = pool.api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { + let event = ChainEvent::NewBestBlock { hash: header.hash(), - is_new_best: true, - header: header.clone(), tree_route: None, }; d0 = header.hash(); @@ -792,23 +761,13 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx1.clone()]); - assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: false, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); - - // Only transactions from new best should be pruned + pool.api.push_block(2, vec![tx1.clone()], false); assert_eq!(pool.status().ready, 1); } // Block D2 { - let header = pool.api.push_block(2, vec![]); + let header = pool.api.push_block(2, vec![], false); let event = block_event_with_retracted(header, d0, &*pool.api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -819,7 +778,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { fn resubmit_from_retracted_fork() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![]); + api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); @@ -844,16 +803,10 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx0.clone()]); + let header = pool.api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: true, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); + block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); } @@ -862,14 +815,8 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(3, vec![tx1.clone()]); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: true, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); + let header = pool.api.push_block(3, vec![tx1.clone()], true); + block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); } @@ -878,14 +825,8 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(4, vec![tx2.clone()]); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: true, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); + let header = pool.api.push_block(4, vec![tx2.clone()], true); + block_on(pool.maintain(block_event(header.clone()))); assert_eq!(pool.status().ready, 0); header.hash() }; @@ -895,14 +836,7 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone()) ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx3.clone()]); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: false, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); + let header = pool.api.push_block(2, vec![tx3.clone()], true); assert_eq!(pool.status().ready, 1); header.hash() }; @@ -912,14 +846,7 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone()) ).expect("1. Imported"); - let header = pool.api.push_block_with_parent(d1.clone(), vec![tx4.clone()]); - let event = ChainEvent::NewBlock { - hash: header.hash(), - is_new_best: false, - header: header.clone(), - tree_route: None, - }; - block_on(pool.maintain(event)); + let header = pool.api.push_block_with_parent(d1.clone(), vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; @@ -929,7 +856,7 @@ fn resubmit_from_retracted_fork() { let _ = block_on( pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone()) ).expect("1. Imported"); - let header = pool.api.push_block_with_parent(e1.clone(), vec![tx5.clone()]); + let header = pool.api.push_block_with_parent(e1.clone(), vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -953,7 +880,7 @@ fn resubmit_from_retracted_fork() { fn ready_set_should_not_resolve_before_block_update() { let (pool, _guard, _notifier) = maintained_pool(); let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); assert!(pool.ready_at(1).now_or_never().is_none()); } @@ -961,7 +888,7 @@ fn ready_set_should_not_resolve_before_block_update() { #[test] fn ready_set_should_resolve_after_block_update() { let (pool, _guard, _notifier) = maintained_pool(); - let header = pool.api.push_block(1, vec![]); + let header = pool.api.push_block(1, vec![], true); let xt1 = uxt(Alice, 209); @@ -974,7 +901,7 @@ fn ready_set_should_resolve_after_block_update() { #[test] fn ready_set_should_eventually_resolve_when_block_update_arrives() { let (pool, _guard, _notifier) = maintained_pool(); - let header = pool.api.push_block(1, vec![]); + let header = pool.api.push_block(1, vec![], true); let xt1 = uxt(Alice, 209); @@ -1063,7 +990,7 @@ fn import_notification_to_pool_maintain_works() { // Get the notification of the block import and maintain the pool with it, // Now, the pool should not contain any transactions. let evt = import_stream.next().expect("Importing a block leads to an event"); - block_on(pool.maintain(evt.into())); + block_on(pool.maintain(evt.try_into().expect("Imported as new best block"))); assert_eq!(pool.status().ready, 0); } @@ -1075,7 +1002,7 @@ fn pruning_a_transaction_should_remove_it_from_best_transaction() { let xt1 = Extrinsic::IncludeData(Vec::new()); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); - let header = pool.api.push_block(1, vec![xt1.clone()]); + let header = pool.api.push_block(1, vec![xt1.clone()], true); // This will prune `xt1`. block_on(pool.maintain(block_event(header))); @@ -1091,3 +1018,23 @@ fn pruning_a_transaction_should_remove_it_from_best_transaction() { // returned a second time by the iterator. assert!(iterator.next().is_none()); } + +#[test] +fn only_revalidate_on_best_block() { + let xt = uxt(Alice, 209); + + let (pool, _guard, mut notifier) = maintained_pool(); + + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + let header = pool.api.push_block(1, vec![], true); + + pool.api.push_block(2, vec![], false); + pool.api.push_block(2, vec![], false); + + block_on(pool.maintain(block_event(header))); + block_on(notifier.next()); + + assert_eq!(pool.status().ready, 1); +} diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index 7d1d5537dc9..6235ca7cdfc 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -248,14 +248,10 @@ pub trait TransactionPool: Send + Sync { /// Events that the transaction pool listens for. pub enum ChainEvent { - /// New blocks have been added to the chain - NewBlock { - /// Is this the new best block. - is_new_best: bool, + /// New best block have been added to the chain + NewBestBlock { /// Hash of the block. hash: B::Hash, - /// Header of the just imported block - header: B::Header, /// Tree route from old best to new best parent that was calculated on import. /// /// If `None`, no re-org happened on import. diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 17cecd394ab..f772ba9b02d 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -35,6 +35,7 @@ use substrate_test_runtime_client::{ AccountKeyring::{self, *}, }; use sp_blockchain::CachedHeaderMetadata; +use futures::future::ready; /// Error type used by [`TestApi`]. #[derive(Debug, derive_more::From, derive_more::Display)] @@ -52,9 +53,30 @@ impl std::error::Error for Error { } } +pub enum IsBestBlock { + Yes, + No, +} + +impl IsBestBlock { + pub fn is_best(&self) -> bool { + matches!(self, Self::Yes) + } +} + +impl From for IsBestBlock { + fn from(is_best: bool) -> Self { + if is_best { + Self::Yes + } else { + Self::No + } + } +} + #[derive(Default)] pub struct ChainState { - pub block_by_number: BTreeMap>, + pub block_by_number: BTreeMap>, pub block_by_hash: HashMap, pub nonces: HashMap, pub invalid_hashes: HashSet, @@ -86,7 +108,7 @@ impl TestApi { }; // Push genesis block - api.push_block(0, Vec::new()); + api.push_block(0, Vec::new(), true); api } @@ -97,10 +119,12 @@ impl TestApi { } /// Push block under given number. - /// - /// If multiple blocks exists with the same block number, the first inserted block will be - /// interpreted as part of the canonical chain. - pub fn push_block(&self, block_number: BlockNumber, xts: Vec) -> Header { + pub fn push_block( + &self, + block_number: BlockNumber, + xts: Vec, + is_best_block: bool, + ) -> Header { let parent_hash = { let chain = self.chain.read(); block_number @@ -109,12 +133,12 @@ impl TestApi { chain.block_by_number .get(&num) .map(|blocks| { - blocks[0].header.hash() + blocks[0].0.header.hash() }) }).unwrap_or_default() }; - self.push_block_with_parent(parent_hash, xts) + self.push_block_with_parent(parent_hash, xts, is_best_block) } /// Push a block using the given `parent`. @@ -124,14 +148,14 @@ impl TestApi { &self, parent: Hash, xts: Vec, + is_best_block: bool, ) -> Header { - let mut chain = self.chain.write(); - // `Hash::default()` is the genesis parent hash let block_number = if parent == Hash::default() { 0 } else { - *chain.block_by_hash + *self.chain.read() + .block_by_hash .get(&parent) .expect("`parent` exists") .header() @@ -146,14 +170,21 @@ impl TestApi { state_root: Default::default(), }; - let hash = header.hash(); - let block = Block::new(header.clone(), xts); - chain.block_by_hash.insert(hash, block.clone()); - chain.block_by_number.entry(block_number).or_default().push(block); + self.add_block(Block::new(header.clone(), xts), is_best_block); header } + /// Add a block to the internal state. + pub fn add_block(&self, block: Block, is_best_block: bool) { + let hash = block.header.hash(); + let block_number = block.header.number().clone(); + + let mut chain = self.chain.write(); + chain.block_by_hash.insert(hash, block.clone()); + chain.block_by_number.entry(block_number).or_default().push((block, is_best_block.into())); + } + fn hash_and_length_inner(ex: &Extrinsic) -> (Hash, usize) { let encoded = ex.encode(); (BlakeTwo256::hash(&encoded), encoded.len()) @@ -203,12 +234,36 @@ impl sc_transaction_graph::ChainApi for TestApi { fn validate_transaction( &self, - _at: &BlockId, + at: &BlockId, _source: TransactionSource, uxt: sc_transaction_graph::ExtrinsicFor, ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); + match self.block_id_to_number(at) { + Ok(Some(number)) => { + let found_best = self.chain + .read() + .block_by_number + .get(&number) + .map(|blocks| blocks.iter().any(|b| b.1.is_best())) + .unwrap_or(false); + + // If there is no best block, we don't know based on which block we should validate + // the transaction. (This is not required for this test function, but in real + // environment it would fail because of this). + if !found_best { + return ready(Ok( + Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1)).into()) + )) + } + }, + Ok(None) => return ready(Ok( + Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)).into()) + )), + Err(e) => return ready(Err(e)), + } + let (requires, provides) = if let Some(transfer) = uxt.try_transfer() { let chain_nonce = self.chain.read().nonces.get(&transfer.from).cloned().unwrap_or(0); let requires = if chain_nonce == transfer.nonce { @@ -224,7 +279,7 @@ impl sc_transaction_graph::ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return futures::future::ready(Ok( + return ready(Ok( Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into()) )) } @@ -239,7 +294,7 @@ impl sc_transaction_graph::ChainApi for TestApi { (self.valid_modifier.read())(&mut validity); - futures::future::ready(Ok(Ok(validity))) + ready(Ok(Ok(validity))) } fn block_id_to_number( @@ -266,7 +321,7 @@ impl sc_transaction_graph::ChainApi for TestApi { .read() .block_by_number .get(num) - .map(|blocks| blocks[0].header().hash()), + .and_then(|blocks| blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash())), }) } @@ -283,7 +338,7 @@ impl sc_transaction_graph::ChainApi for TestApi { .read() .block_by_number .get(num) - .map(|b| b[0].extrinsics().to_vec()), + .map(|b| b[0].0.extrinsics().to_vec()), BlockId::Hash(hash) => self.chain .read() .block_by_hash @@ -332,4 +387,3 @@ pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { let signature = transfer.using_encoded(|e| who.sign(e)).into(); Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: false } } - -- GitLab From 7b002350a26b5013979b5b963ecea7626f860645 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 7 Aug 2020 21:21:24 +0200 Subject: [PATCH 067/132] Fix `wait_for_blocks` counting blocks that didn't go through consensus (#6850) * Initial commit Forked at: 74804b5649eccfb83c90aec87bdca58e5d5c8789 Parent branch: origin/master * WIP Forked at: 74804b5649eccfb83c90aec87bdca58e5d5c8789 Parent branch: origin/master --- test-utils/client/src/lib.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 060d4879675..f64e7e3cfb7 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -384,9 +384,11 @@ where Box::pin(async move { while let Some(notification) = import_notification_stream.next().await { - blocks.insert(notification.hash); - if blocks.len() == count { - break; + if notification.is_new_best { + blocks.insert(notification.hash); + if blocks.len() == count { + break; + } } } }) -- GitLab From 2508795149f957dc234406ae9fff4026f21988df Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 9 Aug 2020 01:24:34 +0200 Subject: [PATCH 068/132] pallet-evm: move gas price check to execute_evm (#6837) --- frame/evm/src/lib.rs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 0dcc4526c7f..013da0cca97 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -326,7 +326,6 @@ decl_module! { gas_price: U256, nonce: Option, ) -> DispatchResult { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_call( @@ -335,7 +334,7 @@ decl_module! { input, value, gas_limit, - gas_price, + Some(gas_price), nonce, true, )? { @@ -362,7 +361,6 @@ decl_module! { gas_price: U256, nonce: Option, ) -> DispatchResult { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_create( @@ -370,7 +368,7 @@ decl_module! { init, value, gas_limit, - gas_price, + Some(gas_price), nonce, true, )? { @@ -397,7 +395,6 @@ decl_module! { gas_price: U256, nonce: Option, ) -> DispatchResult { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_create2( @@ -406,7 +403,7 @@ decl_module! { salt, value, gas_limit, - gas_price, + Some(gas_price), nonce, true, )? { @@ -486,7 +483,7 @@ impl Module { init: Vec, value: U256, gas_limit: u32, - gas_price: U256, + gas_price: Option, nonce: Option, apply_state: bool, ) -> Result<(ExitReason, H160, U256), Error> { @@ -518,7 +515,7 @@ impl Module { salt: H256, value: U256, gas_limit: u32, - gas_price: U256, + gas_price: Option, nonce: Option, apply_state: bool, ) -> Result<(ExitReason, H160, U256), Error> { @@ -552,7 +549,7 @@ impl Module { input: Vec, value: U256, gas_limit: u32, - gas_price: U256, + gas_price: Option, nonce: Option, apply_state: bool, ) -> Result<(ExitReason, Vec, U256), Error> { @@ -578,13 +575,21 @@ impl Module { source: H160, value: U256, gas_limit: u32, - gas_price: U256, + gas_price: Option, nonce: Option, apply_state: bool, f: F, ) -> Result<(ExitReason, R, U256), Error> where F: FnOnce(&mut StackExecutor>) -> (ExitReason, R), { + let gas_price = match gas_price { + Some(gas_price) => { + ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); + gas_price + }, + None => U256::zero(), + }; + let vicinity = Vicinity { gas_price, origin: source, -- GitLab From 6f31766f9bddc542aac31423aec0e961a82260cf Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Mon, 10 Aug 2020 03:52:21 -0400 Subject: [PATCH 069/132] Update pinned wasm-bindgen dependency (#6861) * Update dependency. * Update readme docs * update python webserver command --- Cargo.lock | 22 +++++++++++----------- bin/node/browser-testing/Cargo.toml | 2 +- bin/node/cli/browser-demo/README.md | 6 +++++- bin/node/cli/browser-demo/build.sh | 2 +- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20ed87326b3..7bb44a0e6da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9251,7 +9251,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" dependencies = [ - "rand 0.3.23", + "rand 0.7.3", ] [[package]] @@ -9475,9 +9475,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" +checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" dependencies = [ "cfg-if", "serde", @@ -9487,9 +9487,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" +checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" dependencies = [ "bumpalo", "lazy_static", @@ -9514,9 +9514,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" +checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9524,9 +9524,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" +checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" dependencies = [ "proc-macro2", "quote", @@ -9537,9 +9537,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" +checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" [[package]] name = "wasm-bindgen-test" diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index a5004df20e8..c3136ab74a4 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -12,7 +12,7 @@ libp2p = { version = "0.22.0", default-features = false } jsonrpc-core = "14.2.0" serde = "1.0.106" serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.62", features = ["serde-serialize"] } +wasm-bindgen = { version = "=0.2.67", features = ["serde-serialize"] } wasm-bindgen-futures = "0.4.10" wasm-bindgen-test = "0.3.10" futures = "0.3.4" diff --git a/bin/node/cli/browser-demo/README.md b/bin/node/cli/browser-demo/README.md index 08f1646f114..a11b250ba1f 100644 --- a/bin/node/cli/browser-demo/README.md +++ b/bin/node/cli/browser-demo/README.md @@ -1,6 +1,10 @@ # How to run this demo ```sh -cargo install wasm-bindgen-cli # If necessary +# If necessary, install wasm-bindgen +# The version must match that used when building the browser demo. +cargo install --version 0.2.67 wasm-bindgen-cli + +# Run the build script ./build.sh ``` diff --git a/bin/node/cli/browser-demo/build.sh b/bin/node/cli/browser-demo/build.sh index be52b7a523f..ea0380b760e 100755 --- a/bin/node/cli/browser-demo/build.sh +++ b/bin/node/cli/browser-demo/build.sh @@ -1,4 +1,4 @@ #!/usr/bin/env sh cargo +nightly build --release -p node-cli --target wasm32-unknown-unknown --no-default-features --features browser -Z features=itarget wasm-bindgen ../../../../target/wasm32-unknown-unknown/release/node_cli.wasm --out-dir pkg --target web -python -m http.server 8000 +python -m SimpleHTTPServer 8000 -- GitLab From eb0e05e126a027499d0993c0df8833c55bc359e0 Mon Sep 17 00:00:00 2001 From: Hernando Castano Date: Mon, 10 Aug 2020 06:31:36 -0400 Subject: [PATCH 070/132] Add Subscription RPC for Grandpa Finality (#5732) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rough skeleton for what I think the RPC should look like * Create channel for sending justifications Sends finalized header and justification from Grandpa to the client. This lays the groundwork for hooking into the RPC module. * WIP: Add subscribers for justifications to Grandpa Adds the Sender end of a channel into Grandpa, through which notifications about block finality events can be sent. * WIP: Add a struct for managing subscriptions Slightly different approach from the last commit, but same basic idea. Still a rough sketch, very much doesn't compile yet. * Make naming more clear and lock data in Arc * Rough idea of what RPC would look like * Remove code from previous approach * Missed some things * Update client/rpc-api/src/chain/mod.rs Co-Authored-By: Tomasz Drwięga * Update client/rpc-api/src/chain/mod.rs Co-Authored-By: Tomasz Drwięga * Split justification subscription into sender and receiver halves * Replace RwLock with a Mutex * Add sample usage from the Service's point of view * Remove code that referred to "chain_" RPC * Use the Justification sender/receivers from Grandpa LinkHalf * Add some PubSub boilerplate * Add guiding comments * TMP: comment out to fix compilation * Return MetaIoHandler from PubSubHandler in create_full * Uncomment pubsub methods in rpc handler (fails to build) * node/rpc: make Metadata concrete in create_full to fix compilation * node: pass in SubscriptionManger to grandpa rpc handler * grandpa-rpc: use SubscriptionManger to add subscriber * grandpa-rpc: attempt at setting up the justification stream (fails to build) * grandpa-rpc: fix compilation of connecting stream to sink * grandpa-rpc: implement unsubscribe * grandpa-rpc: update older tests * grandpa-rpc: add full prefix to avoid confusing rust-analyzer * grandpa-rpc: add test for pubsub not available * grandpa-rpc: tidy up leftover code * grandpa-rpc: add test for sub and unsub of justifications * grandpa-rpc: minor stylistic changes * grandpa-rpc: split unit test * grandpa-rpc: minor stylistic changes in test * grandpa-rpc: skip returning future when cancelling * grandpa-rpc: reuse testing executor from sc-rpc * grandpa-rpc: don't need to use PubSubHandler in tests * node-rpc: use MetaIoHandler rather than PubSubHandler * grandpa: log if getting header failed * grandpa: move justification channel creation into factory function * grandpa: make the justification sender optional * grandpa: fix compilation warnings * grandpa: move justification notification types to new file * grandpa-rpc: move JustificationNotification to grandpa-rpc * grandpa-rpc: move JustificationNotification to its own file * grandpa: rename justification channel pairs * grandpa: rename notifier types * grandpa: pass justification as GrandpaJustification to the rpc module * Move Metadata to sc-rpc-api * grandpa-rpc: remove unsed error code * grandpa: fix bug for checking if channel is closed before sendind * grandpa-rpc: unit test for sending justifications * grandpa-rpc: update comments for the pubsub test * grandpa-rpc: update pubsub tests with more steps * grandpa-rpc: fix pubsub test * grandpa-rpc: minor indendation * grandpa-rpc: decode instead of encode in test * grandpa: fix review comments * grandpa: remove unused serde dependency Co-authored-by: Tomasz Drwięga Co-authored-by: Jon Häggblad Co-authored-by: Tomasz Drwięga --- Cargo.lock | 15 +- bin/node-template/node/src/service.rs | 4 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 10 +- bin/node/rpc/Cargo.toml | 20 +- bin/node/rpc/src/lib.rs | 38 ++- client/finality-grandpa/rpc/Cargo.toml | 13 + client/finality-grandpa/rpc/src/lib.rs | 299 ++++++++++++++++-- .../finality-grandpa/rpc/src/notification.rs | 32 ++ client/finality-grandpa/src/environment.rs | 19 +- client/finality-grandpa/src/finality_proof.rs | 2 +- client/finality-grandpa/src/import.rs | 6 + client/finality-grandpa/src/justification.rs | 4 +- client/finality-grandpa/src/lib.rs | 21 ++ client/finality-grandpa/src/notification.rs | 102 ++++++ client/finality-grandpa/src/observer.rs | 14 +- client/finality-grandpa/src/tests.rs | 1 + client/rpc-api/src/lib.rs | 4 +- client/{rpc => rpc-api}/src/metadata.rs | 4 +- client/rpc/Cargo.toml | 5 +- client/rpc/src/author/mod.rs | 2 +- client/rpc/src/chain/mod.rs | 14 +- client/rpc/src/lib.rs | 10 +- client/rpc/src/state/mod.rs | 12 +- client/rpc/src/state/state_full.rs | 8 +- client/rpc/src/state/state_light.rs | 8 +- client/rpc/src/testing.rs | 1 + client/service/src/builder.rs | 14 +- 28 files changed, 589 insertions(+), 94 deletions(-) create mode 100644 client/finality-grandpa/rpc/src/notification.rs create mode 100644 client/finality-grandpa/src/notification.rs rename client/{rpc => rpc-api}/src/metadata.rs (95%) diff --git a/Cargo.lock b/Cargo.lock index 7bb44a0e6da..c82803d7bc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3548,6 +3548,7 @@ dependencies = [ "futures 0.3.5", "hex-literal", "jsonrpc-core", + "jsonrpc-pubsub", "log", "nix", "node-executor", @@ -3680,6 +3681,7 @@ name = "node-rpc" version = "2.0.0-rc5" dependencies = [ "jsonrpc-core", + "jsonrpc-pubsub", "node-primitives", "node-runtime", "pallet-contracts-rpc", @@ -3698,7 +3700,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", - "sp-runtime", "sp-transaction-pool", "substrate-frame-rpc-system", ] @@ -6622,11 +6623,23 @@ dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", + "jsonrpc-pubsub", + "lazy_static", "log", + "parity-scale-codec", + "sc-block-builder", "sc-finality-grandpa", + "sc-network-test", + "sc-rpc", "serde", "serde_json", + "sp-blockchain", + "sp-consensus", "sp-core", + "sp-finality-grandpa", + "sp-keyring", + "sp-runtime", + "substrate-test-runtime-client", ] [[package]] diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 021d0ac8f7d..4eba4fdd093 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -113,7 +113,7 @@ pub fn new_full(config: Configuration) -> Result { let client = client.clone(); let pool = transaction_pool.clone(); - Box::new(move |deny_unsafe| { + Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), @@ -278,7 +278,7 @@ pub fn new_light(config: Configuration) -> Result { transaction_pool, task_manager: &mut task_manager, on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_| ()), + rpc_extensions_builder: Box::new(|_, _| ()), telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), config, client, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4fbb48513b3..2f0124482e2 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -39,6 +39,7 @@ serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" jsonrpc-core = "14.2.0" +jsonrpc-pubsub = "14.2.0" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index fd2c240a44e..be95ed6de53 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -49,7 +49,10 @@ pub fn new_partial(config: &Configuration) -> Result, sc_transaction_pool::FullPool, ( - impl Fn(node_rpc::DenyUnsafe) -> node_rpc::IoHandler, + impl Fn( + node_rpc::DenyUnsafe, + jsonrpc_pubsub::manager::SubscriptionManager + ) -> node_rpc::IoHandler, ( sc_consensus_babe::BabeBlockImport, grandpa::LinkHalf, @@ -101,6 +104,7 @@ pub fn new_partial(config: &Configuration) -> Result Result Result { @@ -75,6 +76,10 @@ pub struct GrandpaDeps { pub shared_voter_state: SharedVoterState, /// Authority set info. pub shared_authority_set: SharedAuthoritySet, + /// Receives notifications about justification events from Grandpa. + pub justification_stream: GrandpaJustificationStream, + /// Subscription manager to keep track of pubsub subscribers. + pub subscriptions: SubscriptionManager, } /// Full client dependencies. @@ -97,9 +102,9 @@ pub struct FullDeps { pub type IoHandler = jsonrpc_core::IoHandler; /// Instantiate all Full RPC extensions. -pub fn create_full( +pub fn create_full( deps: FullDeps, -) -> jsonrpc_core::IoHandler where +) -> jsonrpc_core::IoHandler where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata + 'static, C: Send + Sync + 'static, @@ -109,7 +114,6 @@ pub fn create_full( C::Api: BabeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, - M: jsonrpc_core::Metadata + Default, SC: SelectChain +'static, { use substrate_frame_rpc_system::{FullSystem, SystemApi}; @@ -125,6 +129,7 @@ pub fn create_full( babe, grandpa, } = deps; + let BabeDeps { keystore, babe_config, @@ -133,6 +138,8 @@ pub fn create_full( let GrandpaDeps { shared_voter_state, shared_authority_set, + justification_stream, + subscriptions, } = grandpa; io.extend_with( @@ -161,7 +168,12 @@ pub fn create_full( ); io.extend_with( sc_finality_grandpa_rpc::GrandpaApi::to_delegate( - GrandpaRpcHandler::new(shared_authority_set, shared_voter_state) + GrandpaRpcHandler::new( + shared_authority_set, + shared_voter_state, + justification_stream, + subscriptions, + ) ) ); diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index f8f567c02e7..ca405eaec9d 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -8,16 +8,29 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +sc-rpc = { version = "2.0.0-rc5", path = "../../rpc" } +sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } sc-finality-grandpa = { version = "0.8.0-rc5", path = "../" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" +jsonrpc-pubsub = "14.2.0" futures = { version = "0.3.4", features = ["compat"] } serde = { version = "1.0.105", features = ["derive"] } serde_json = "1.0.50" log = "0.4.8" derive_more = "0.99.2" +parity-scale-codec = { version = "1.3.0", features = ["derive"] } [dev-dependencies] +sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } +sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } +sc-rpc = { version = "2.0.0-rc5", path = "../../rpc", features = ["test-helpers"] } +sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } +sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +lazy_static = "1.4" diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 1af84b7a844..c00c95c5f77 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -19,13 +19,25 @@ //! RPC API for GRANDPA. #![warn(missing_docs)] -use futures::{FutureExt, TryFutureExt}; +use futures::{FutureExt, TryFutureExt, TryStreamExt, StreamExt}; +use log::warn; use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; +use jsonrpc_core::futures::{ + sink::Sink as Sink01, + stream::Stream as Stream01, + future::Future as Future01, +}; mod error; +mod notification; mod report; +use sc_finality_grandpa::GrandpaJustificationStream; +use sp_runtime::traits::Block as BlockT; + use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; +use notification::JustificationNotification; /// Returned when Grandpa RPC endpoint is not ready. pub const NOT_READY_ERROR_CODE: i64 = 1; @@ -35,48 +47,128 @@ type FutureResult = /// Provides RPC methods for interacting with GRANDPA. #[rpc] -pub trait GrandpaApi { +pub trait GrandpaApi { + /// RPC Metadata + type Metadata; + /// Returns the state of the current best round state as well as the /// ongoing background rounds. #[rpc(name = "grandpa_roundState")] fn round_state(&self) -> FutureResult; + + /// Returns the block most recently finalized by Grandpa, alongside + /// side its justification. + #[pubsub( + subscription = "grandpa_justifications", + subscribe, + name = "grandpa_subscribeJustifications" + )] + fn subscribe_justifications( + &self, + metadata: Self::Metadata, + subscriber: Subscriber + ); + + /// Unsubscribe from receiving notifications about recently finalized blocks. + #[pubsub( + subscription = "grandpa_justifications", + unsubscribe, + name = "grandpa_unsubscribeJustifications" + )] + fn unsubscribe_justifications( + &self, + metadata: Option, + id: SubscriptionId + ) -> jsonrpc_core::Result; } /// Implements the GrandpaApi RPC trait for interacting with GRANDPA. -pub struct GrandpaRpcHandler { +pub struct GrandpaRpcHandler { authority_set: AuthoritySet, voter_state: VoterState, + justification_stream: GrandpaJustificationStream, + manager: SubscriptionManager, } -impl GrandpaRpcHandler { - /// Creates a new GrandpaRpcHander instance. - pub fn new(authority_set: AuthoritySet, voter_state: VoterState) -> Self { +impl GrandpaRpcHandler { + /// Creates a new GrandpaRpcHandler instance. + pub fn new( + authority_set: AuthoritySet, + voter_state: VoterState, + justification_stream: GrandpaJustificationStream, + manager: SubscriptionManager, + ) -> Self { Self { authority_set, voter_state, + justification_stream, + manager, } } } -impl GrandpaApi for GrandpaRpcHandler +impl GrandpaApi + for GrandpaRpcHandler where VoterState: ReportVoterState + Send + Sync + 'static, AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, + Block: BlockT, { + type Metadata = sc_rpc::Metadata; + fn round_state(&self) -> FutureResult { let round_states = ReportedRoundStates::from(&self.authority_set, &self.voter_state); let future = async move { round_states }.boxed(); Box::new(future.map_err(jsonrpc_core::Error::from).compat()) } + + fn subscribe_justifications( + &self, + _metadata: Self::Metadata, + subscriber: Subscriber + ) { + let stream = self.justification_stream.subscribe() + .map(|x| Ok::<_,()>(JustificationNotification::from(x))) + .map_err(|e| warn!("Notification stream error: {:?}", e)) + .compat(); + + self.manager.add(subscriber, |sink| { + let stream = stream.map(|res| Ok(res)); + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream) + .map(|_| ()) + }); + } + + fn unsubscribe_justifications( + &self, + _metadata: Option, + id: SubscriptionId + ) -> jsonrpc_core::Result { + Ok(self.manager.cancel(id)) + } } #[cfg(test)] mod tests { use super::*; - use jsonrpc_core::IoHandler; - use sc_finality_grandpa::{report, AuthorityId}; + use std::{collections::HashSet, convert::TryInto, sync::Arc}; + use jsonrpc_core::{Notification, Output, types::Params}; + + use parity_scale_codec::Decode; + use sc_block_builder::BlockBuilder; + use sc_finality_grandpa::{report, AuthorityId, GrandpaJustificationSender, GrandpaJustification}; + use sp_blockchain::HeaderBackend; + use sp_consensus::RecordProof; use sp_core::crypto::Public; - use std::{collections::HashSet, convert::TryInto}; + use sp_keyring::Ed25519Keyring; + use sp_runtime::traits::Header as HeaderT; + use substrate_test_runtime_client::{ + runtime::Block, + DefaultTestClientBuilderExt, + TestClientBuilderExt, + TestClientBuilder, + }; struct TestAuthoritySet; struct TestVoterState; @@ -106,7 +198,7 @@ mod tests { let voter_id_1 = AuthorityId::from_slice(&[1; 32]); let voters_best: HashSet<_> = vec![voter_id_1].into_iter().collect(); - let best_round_state = report::RoundState { + let best_round_state = sc_finality_grandpa::report::RoundState { total_weight: 100_u64.try_into().unwrap(), threshold_weight: 67_u64.try_into().unwrap(), prevote_current_weight: 50.into(), @@ -115,7 +207,7 @@ mod tests { precommit_ids: HashSet::new(), }; - let past_round_state = report::RoundState { + let past_round_state = sc_finality_grandpa::report::RoundState { total_weight: 100_u64.try_into().unwrap(), threshold_weight: 67_u64.try_into().unwrap(), prevote_current_weight: 100.into(), @@ -133,23 +225,42 @@ mod tests { } } + fn setup_io_handler(voter_state: VoterState) -> ( + jsonrpc_core::MetaIoHandler, + GrandpaJustificationSender, + ) where + VoterState: ReportVoterState + Send + Sync + 'static, + { + let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); + let manager = SubscriptionManager::new(Arc::new(sc_rpc::testing::TaskExecutor)); + + let handler = GrandpaRpcHandler::new( + TestAuthoritySet, + voter_state, + justification_stream, + manager, + ); + + let mut io = jsonrpc_core::MetaIoHandler::default(); + io.extend_with(GrandpaApi::to_delegate(handler)); + + (io, justification_sender) + } + #[test] fn uninitialized_rpc_handler() { - let handler = GrandpaRpcHandler::new(TestAuthoritySet, EmptyVoterState); - let mut io = IoHandler::new(); - io.extend_with(GrandpaApi::to_delegate(handler)); + let (io, _) = setup_io_handler(EmptyVoterState); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; let response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":1}"#; - assert_eq!(Some(response.into()), io.handle_request_sync(request)); + let meta = sc_rpc::Metadata::default(); + assert_eq!(Some(response.into()), io.handle_request_sync(request, meta)); } #[test] fn working_rpc_handler() { - let handler = GrandpaRpcHandler::new(TestAuthoritySet, TestVoterState); - let mut io = IoHandler::new(); - io.extend_with(GrandpaApi::to_delegate(handler)); + let (io, _) = setup_io_handler(TestVoterState); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; let response = "{\"jsonrpc\":\"2.0\",\"result\":{\ @@ -166,6 +277,154 @@ mod tests { \"setId\":1\ },\"id\":1}"; - assert_eq!(io.handle_request_sync(request), Some(response.into())); + let meta = sc_rpc::Metadata::default(); + assert_eq!(io.handle_request_sync(request, meta), Some(response.into())); + } + + fn setup_session() -> (sc_rpc::Metadata, jsonrpc_core::futures::sync::mpsc::Receiver) { + let (tx, rx) = jsonrpc_core::futures::sync::mpsc::channel(1); + let meta = sc_rpc::Metadata::new(tx); + (meta, rx) + } + + #[test] + fn subscribe_and_unsubscribe_to_justifications() { + let (io, _) = setup_io_handler(TestVoterState); + let (meta, _) = setup_session(); + + // Subscribe + let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let resp = io.handle_request_sync(sub_request, meta.clone()); + let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); + + let sub_id = match resp { + Output::Success(success) => success.result, + _ => panic!(), + }; + + // Unsubscribe + let unsub_req = format!( + "{{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_unsubscribeJustifications\",\"params\":[{}],\"id\":1}}", + sub_id + ); + assert_eq!( + io.handle_request_sync(&unsub_req, meta.clone()), + Some(r#"{"jsonrpc":"2.0","result":true,"id":1}"#.into()), + ); + + // Unsubscribe again and fail + assert_eq!( + io.handle_request_sync(&unsub_req, meta), + Some(r#"{"jsonrpc":"2.0","result":false,"id":1}"#.into()), + ); + } + + #[test] + fn subscribe_and_unsubscribe_with_wrong_id() { + let (io, _) = setup_io_handler(TestVoterState); + let (meta, _) = setup_session(); + + // Subscribe + let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let resp = io.handle_request_sync(sub_request, meta.clone()); + let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); + assert!(matches!(resp, Output::Success(_))); + + // Unsubscribe with wrong ID + assert_eq!( + io.handle_request_sync( + r#"{"jsonrpc":"2.0","method":"grandpa_unsubscribeJustifications","params":["FOO"],"id":1}"#, + meta.clone() + ), + Some(r#"{"jsonrpc":"2.0","result":false,"id":1}"#.into()) + ); + } + + fn create_justification() -> GrandpaJustification { + let peers = &[Ed25519Keyring::Alice]; + + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = builder.build(); + let client = Arc::new(client); + + let built_block = BlockBuilder::new( + &*client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ).unwrap().build().unwrap(); + + let block = built_block.block; + let block_hash = block.hash(); + + let justification = { + let round = 1; + let set_id = 0; + + let precommit = finality_grandpa::Precommit { + target_hash: block_hash, + target_number: *block.header.number(), + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_finality_grandpa::localized_payload(round, set_id, &msg); + let signature = peers[0].sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: peers[0].public().into(), + }; + + let commit = finality_grandpa::Commit { + target_hash: block_hash, + target_number: *block.header.number(), + precommits: vec![precommit], + }; + + GrandpaJustification::from_commit(&client, round, commit).unwrap() + }; + + justification + } + + #[test] + fn subscribe_and_listen_to_one_justification() { + let (io, justification_sender) = setup_io_handler(TestVoterState); + let (meta, receiver) = setup_session(); + + // Subscribe + let sub_request = + r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + + let resp = io.handle_request_sync(sub_request, meta.clone()); + let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); + let sub_id: String = serde_json::from_value(resp["result"].take()).unwrap(); + + // Notify with a header and justification + let justification = create_justification(); + let _ = justification_sender.notify(justification.clone()).unwrap(); + + // Inspect what we received + let recv = receiver.take(1).wait().flatten().collect::>(); + let recv: Notification = serde_json::from_str(&recv[0]).unwrap(); + let mut json_map = match recv.params { + Params::Map(json_map) => json_map, + _ => panic!(), + }; + + let recv_sub_id: String = + serde_json::from_value(json_map["subscription"].take()).unwrap(); + let recv_justification: Vec = + serde_json::from_value(json_map["result"].take()).unwrap(); + let recv_justification: GrandpaJustification = + Decode::decode(&mut &recv_justification[..]).unwrap(); + + assert_eq!(recv.method, "grandpa_justifications"); + assert_eq!(recv_sub_id, sub_id); + assert_eq!(recv_justification, justification); } } diff --git a/client/finality-grandpa/rpc/src/notification.rs b/client/finality-grandpa/rpc/src/notification.rs new file mode 100644 index 00000000000..831f4681549 --- /dev/null +++ b/client/finality-grandpa/rpc/src/notification.rs @@ -0,0 +1,32 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use serde::{Serialize, Deserialize}; +use parity_scale_codec::Encode; +use sp_runtime::traits::Block as BlockT; +use sc_finality_grandpa::GrandpaJustification; + +/// An encoded justification proving that the given header has been finalized +#[derive(Clone, Serialize, Deserialize)] +pub struct JustificationNotification(Vec); + +impl From> for JustificationNotification { + fn from(notification: GrandpaJustification) -> Self { + JustificationNotification(notification.encode()) + } +} diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index ca47e5e2cc4..a7a29fe0e8a 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -51,6 +51,7 @@ use sp_consensus::SelectChain; use crate::authorities::{AuthoritySet, SharedAuthoritySet}; use crate::communication::Network as NetworkT; use crate::consensus_changes::SharedConsensusChanges; +use crate::notification::GrandpaJustificationSender; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; use crate::voting_rule::VotingRule; @@ -390,7 +391,6 @@ impl Metrics { } } - /// The environment we run GRANDPA in. pub(crate) struct Environment, SC, VR> { pub(crate) client: Arc, @@ -404,6 +404,7 @@ pub(crate) struct Environment, SC, pub(crate) voter_set_state: SharedVoterSetState, pub(crate) voting_rule: VR, pub(crate) metrics: Option, + pub(crate) justification_sender: Option>, pub(crate) _phantom: PhantomData, } @@ -1022,6 +1023,7 @@ where number, (round, commit).into(), false, + &self.justification_sender, ) } @@ -1086,6 +1088,7 @@ pub(crate) fn finalize_block( number: NumberFor, justification_or_commit: JustificationOrCommit, initial_sync: bool, + justification_sender: &Option>, ) -> Result<(), CommandOrError>> where Block: BlockT, BE: Backend, @@ -1097,6 +1100,7 @@ pub(crate) fn finalize_block( let mut authority_set = authority_set.inner().write(); let status = client.info(); + if number <= status.finalized_number && client.hash(number)? == Some(hash) { // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since // the voter will be restarted at the median last finalized block, which can be lower than the local best @@ -1157,7 +1161,7 @@ pub(crate) fn finalize_block( // justifications for transition blocks which will be requested by // syncing clients. let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => Some(justification.encode()), + JustificationOrCommit::Justification(justification) => Some(justification), JustificationOrCommit::Commit((round_number, commit)) => { let mut justification_required = // justification is always required when block that enacts new authorities @@ -1184,13 +1188,22 @@ pub(crate) fn finalize_block( commit, )?; - Some(justification.encode()) + Some(justification) } else { None } }, }; + // Notify any registered listeners in case we have a justification + if let Some(sender) = justification_sender { + if let Some(ref justification) = justification { + let _ = sender.notify(justification.clone()); + } + } + + let justification = justification.map(|j| j.encode()); + debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); // ideally some handle to a synchronization oracle would be used diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index f334ddde2b9..2ac9ec57f3d 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -149,7 +149,7 @@ impl AuthoritySetForFinalityChecker for Arc { +pub struct FinalityProofProvider { backend: Arc, authority_provider: Arc>, } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index b37ab7907a6..c9f2d8d2f7b 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -44,6 +44,7 @@ use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingCha use crate::consensus_changes::SharedConsensusChanges; use crate::environment::finalize_block; use crate::justification::GrandpaJustification; +use crate::notification::GrandpaJustificationSender; use std::marker::PhantomData; /// A block-import handler for GRANDPA. @@ -62,6 +63,7 @@ pub struct GrandpaBlockImport { send_voter_commands: TracingUnboundedSender>>, consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: HashMap>>, + justification_sender: GrandpaJustificationSender, _phantom: PhantomData, } @@ -76,6 +78,7 @@ impl Clone for send_voter_commands: self.send_voter_commands.clone(), consensus_changes: self.consensus_changes.clone(), authority_set_hard_forks: self.authority_set_hard_forks.clone(), + justification_sender: self.justification_sender.clone(), _phantom: PhantomData, } } @@ -560,6 +563,7 @@ impl GrandpaBlockImport>>, consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: Vec<(SetId, PendingChange>)>, + justification_sender: GrandpaJustificationSender, ) -> GrandpaBlockImport { // check for and apply any forced authority set hard fork that applies // to the *current* authority set. @@ -603,6 +607,7 @@ impl GrandpaBlockImport { round: u64, pub(crate) commit: Commit, @@ -47,7 +47,7 @@ pub struct GrandpaJustification { impl GrandpaJustification { /// Create a GRANDPA justification from the given commit. This method /// assumes the commit is valid and well-formed. - pub(crate) fn from_commit( + pub fn from_commit( client: &Arc, round: u64, commit: Commit, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 7d74d0eebfc..a586dc8e31f 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -119,12 +119,14 @@ mod finality_proof; mod import; mod justification; mod light_import; +mod notification; mod observer; mod until_imported; mod voting_rule; pub use authorities::SharedAuthoritySet; pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider}; +pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; pub use import::GrandpaBlockImport; pub use justification::GrandpaJustification; pub use light_import::{light_block_import, GrandpaLightBlockImport}; @@ -448,6 +450,8 @@ pub struct LinkHalf { select_chain: SC, persistent_data: PersistentData, voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: GrandpaJustificationSender, + justification_stream: GrandpaJustificationStream, } impl LinkHalf { @@ -455,6 +459,11 @@ impl LinkHalf { pub fn shared_authority_set(&self) -> &SharedAuthoritySet> { &self.persistent_data.authority_set } + + /// Get the receiving end of justification notifications. + pub fn justification_stream(&self) -> GrandpaJustificationStream { + self.justification_stream.clone() + } } /// Provider for the Grandpa authority set configured on the genesis block. @@ -553,6 +562,9 @@ where let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); + let (justification_sender, justification_stream) = + GrandpaJustificationStream::channel(); + // create pending change objects with 0 delay and enacted on finality // (i.e. standard changes) for each authority set hard fork. let authority_set_hard_forks = authority_set_hard_forks @@ -579,12 +591,15 @@ where voter_commands_tx, persistent_data.consensus_changes.clone(), authority_set_hard_forks, + justification_sender.clone(), ), LinkHalf { client, select_chain, persistent_data, voter_commands_rx, + justification_sender, + justification_stream, }, )) } @@ -719,6 +734,8 @@ pub fn run_grandpa_voter( select_chain, persistent_data, voter_commands_rx, + justification_sender, + justification_stream: _, } = link; let network = NetworkBridge::new( @@ -767,6 +784,7 @@ pub fn run_grandpa_voter( voter_commands_rx, prometheus_registry, shared_voter_state, + justification_sender, ); let voter_work = voter_work @@ -827,6 +845,7 @@ where voter_commands_rx: TracingUnboundedReceiver>>, prometheus_registry: Option, shared_voter_state: SharedVoterState, + justification_sender: GrandpaJustificationSender, ) -> Self { let metrics = match prometheus_registry.as_ref().map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), @@ -850,6 +869,7 @@ where consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state, metrics: metrics.as_ref().map(|m| m.environment.clone()), + justification_sender: Some(justification_sender), _phantom: PhantomData, }); @@ -988,6 +1008,7 @@ where network: self.env.network.clone(), voting_rule: self.env.voting_rule.clone(), metrics: self.env.metrics.clone(), + justification_sender: self.env.justification_sender.clone(), _phantom: PhantomData, }); diff --git a/client/finality-grandpa/src/notification.rs b/client/finality-grandpa/src/notification.rs new file mode 100644 index 00000000000..16f705f0eeb --- /dev/null +++ b/client/finality-grandpa/src/notification.rs @@ -0,0 +1,102 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; +use parking_lot::Mutex; + +use sp_runtime::traits::Block as BlockT; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; + +use crate::justification::GrandpaJustification; + +// Stream of justifications returned when subscribing. +type JustificationStream = TracingUnboundedReceiver>; + +// Sending endpoint for notifying about justifications. +type JustificationSender = TracingUnboundedSender>; + +// Collection of channel sending endpoints shared with the receiver side so they can register +// themselves. +type SharedJustificationSenders = Arc>>>; + +/// The sending half of the Grandpa justification channel(s). +/// +/// Used to send notifications about justifications generated +/// at the end of a Grandpa round. +#[derive(Clone)] +pub struct GrandpaJustificationSender { + subscribers: SharedJustificationSenders +} + +impl GrandpaJustificationSender { + /// The `subscribers` should be shared with a corresponding + /// `GrandpaJustificationStream`. + fn new(subscribers: SharedJustificationSenders) -> Self { + Self { + subscribers, + } + } + + /// Send out a notification to all subscribers that a new justification + /// is available for a block. + pub fn notify(&self, notification: GrandpaJustification) -> Result<(), ()> { + self.subscribers.lock().retain(|n| { + !n.is_closed() && n.unbounded_send(notification.clone()).is_ok() + }); + Ok(()) + } +} + +/// The receiving half of the Grandpa justification channel. +/// +/// Used to receive notifications about justifications generated +/// at the end of a Grandpa round. +/// The `GrandpaJustificationStream` entity stores the `SharedJustificationSenders` +/// so it can be used to add more subscriptions. +#[derive(Clone)] +pub struct GrandpaJustificationStream { + subscribers: SharedJustificationSenders +} + +impl GrandpaJustificationStream { + /// Creates a new pair of receiver and sender of justification notifications. + pub fn channel() -> (GrandpaJustificationSender, Self) { + let subscribers = Arc::new(Mutex::new(vec![])); + let receiver = GrandpaJustificationStream::new(subscribers.clone()); + let sender = GrandpaJustificationSender::new(subscribers.clone()); + (sender, receiver) + } + + /// Create a new receiver of justification notifications. + /// + /// The `subscribers` should be shared with a corresponding + /// `GrandpaJustificationSender`. + fn new(subscribers: SharedJustificationSenders) -> Self { + Self { + subscribers, + } + } + + /// Subscribe to a channel through which justifications are sent + /// at the end of each Grandpa voting round. + pub fn subscribe(&self) -> JustificationStream { + let (sender, receiver) = tracing_unbounded("mpsc_justification_notification_stream"); + self.subscribers.lock().push(sender); + receiver + } +} diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 6a7a1f07b05..8fb536a3697 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -40,6 +40,7 @@ use crate::{ use crate::authorities::SharedAuthoritySet; use crate::communication::{Network as NetworkT, NetworkBridge}; use crate::consensus_changes::SharedConsensusChanges; +use crate::notification::GrandpaJustificationSender; use sp_finality_grandpa::AuthorityId; use std::marker::{PhantomData, Unpin}; @@ -69,6 +70,7 @@ fn grandpa_observer( authority_set: &SharedAuthoritySet>, consensus_changes: &SharedConsensusChanges>, voters: &Arc>, + justification_sender: &Option>, last_finalized_number: NumberFor, commits: S, note_round: F, @@ -85,6 +87,7 @@ fn grandpa_observer( let consensus_changes = consensus_changes.clone(); let client = client.clone(); let voters = voters.clone(); + let justification_sender = justification_sender.clone(); let observer = commits.try_fold(last_finalized_number, move |last_finalized_number, global| { let (round, commit, callback) = match global { @@ -127,6 +130,7 @@ fn grandpa_observer( finalized_number, (round, commit).into(), false, + &justification_sender, ) { Ok(_) => {}, Err(e) => return future::err(e), @@ -177,6 +181,7 @@ where select_chain: _, persistent_data, voter_commands_rx, + justification_sender, .. } = link; @@ -192,7 +197,8 @@ where network, persistent_data, config.keystore, - voter_commands_rx + voter_commands_rx, + Some(justification_sender), ); let observer_work = observer_work @@ -213,6 +219,7 @@ struct ObserverWork> { persistent_data: PersistentData, keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: Option>, _phantom: PhantomData, } @@ -230,6 +237,7 @@ where persistent_data: PersistentData, keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: Option>, ) -> Self { let mut work = ObserverWork { @@ -241,6 +249,7 @@ where persistent_data, keystore: keystore.clone(), voter_commands_rx, + justification_sender, _phantom: PhantomData, }; work.rebuild_observer(); @@ -287,6 +296,7 @@ where &self.persistent_data.authority_set, &self.persistent_data.consensus_changes, &voters, + &self.justification_sender, last_finalized_number, global_in, note_round, @@ -422,12 +432,14 @@ mod tests { ).unwrap(); let (_tx, voter_command_rx) = tracing_unbounded(""); + let observer = ObserverWork::new( client, tester.net_handle.clone(), persistent_data, None, voter_command_rx, + None, ); // Trigger a reputation change through the gossip validator. diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index e2cdd7653a6..d2905e4da44 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1567,6 +1567,7 @@ where network, voting_rule, metrics: None, + justification_sender: None, _phantom: PhantomData, } } diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index cd2608dda92..7bae7518105 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -22,10 +22,12 @@ mod errors; mod helpers; +mod metadata; mod policy; -pub use jsonrpc_core::IoHandlerExtension as RpcExtension; pub use helpers::Receiver; +pub use jsonrpc_core::IoHandlerExtension as RpcExtension; +pub use metadata::Metadata; pub use policy::DenyUnsafe; pub mod author; diff --git a/client/rpc/src/metadata.rs b/client/rpc-api/src/metadata.rs similarity index 95% rename from client/rpc/src/metadata.rs rename to client/rpc-api/src/metadata.rs index 0416b07a679..cffcbf61f54 100644 --- a/client/rpc/src/metadata.rs +++ b/client/rpc-api/src/metadata.rs @@ -19,8 +19,8 @@ //! RPC Metadata use std::sync::Arc; +use jsonrpc_core::futures::sync::mpsc; use jsonrpc_pubsub::{Session, PubSubMetadata}; -use rpc::futures::sync::mpsc; /// RPC Metadata. /// @@ -32,7 +32,7 @@ pub struct Metadata { session: Option>, } -impl rpc::Metadata for Metadata {} +impl jsonrpc_core::Metadata for Metadata {} impl PubSubMetadata for Metadata { fn session(&self) -> Option> { self.session.clone() diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 9c91fa3bc07..5681672cc34 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -37,6 +37,7 @@ sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transact sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" +lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] assert_matches = "1.3.0" @@ -46,4 +47,6 @@ sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } tokio = "0.1.22" sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } -lazy_static = "1.4.0" + +[features] +test-helpers = ["lazy_static"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 974c1b85e1b..e6384c995dc 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -94,7 +94,7 @@ impl AuthorApi, BlockHash

, inherent_data_providers: sp_inherents::InherentDataProviders, + can_author_with: CAW, } -impl AuraVerifier - where P: Send + Sync + 'static +impl AuraVerifier where + P: Send + Sync + 'static, + CAW: Send + Sync + 'static, { fn check_inherents( &self, @@ -507,11 +509,22 @@ impl AuraVerifier block_id: BlockId, inherent_data: InherentData, timestamp_now: u64, - ) -> Result<(), Error> - where C: ProvideRuntimeApi, C::Api: BlockBuilderApi + ) -> Result<(), Error> where + C: ProvideRuntimeApi, C::Api: BlockBuilderApi, + CAW: CanAuthorWith, { const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "aura", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -553,7 +566,7 @@ impl AuraVerifier } #[forbid(deprecated)] -impl Verifier for AuraVerifier where +impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + Sync + @@ -565,6 +578,7 @@ impl Verifier for AuraVerifier where P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, + CAW: CanAuthorWith + Send + Sync + 'static, { fn verify( &mut self, @@ -812,7 +826,7 @@ impl BlockImport for AuraBlockImport( +pub fn import_queue( slot_duration: SlotDuration, block_import: I, justification_import: Option>, @@ -821,6 +835,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &S, registry: Option<&Registry>, + can_author_with: CAW, ) -> Result, sp_consensus::Error> where B: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, @@ -831,6 +846,7 @@ pub fn import_queue( P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, S: sp_core::traits::SpawnNamed, + CAW: CanAuthorWith + Send + Sync + 'static, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; initialize_authorities_cache(&*client)?; @@ -839,6 +855,7 @@ pub fn import_queue( client, inherent_data_providers, phantom: PhantomData, + can_author_with, }; Ok(BasicQueue::new( @@ -854,7 +871,7 @@ pub fn import_queue( #[cfg(test)] mod tests { use super::*; - use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof}; + use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor}; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; use sc_network::config::ProtocolConfig; @@ -924,7 +941,7 @@ mod tests { } impl TestNetFactory for AuraTestNet { - type Verifier = AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); /// Create new test network with peers and given config. @@ -951,6 +968,7 @@ mod tests { client, inherent_data_providers, phantom: Default::default(), + can_author_with: AlwaysCanAuthor, } }, PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 951d1467b49..9e7c3c9081b 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -787,22 +787,24 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, epoch_changes: SharedEpochChanges, time_source: TimeSource, + can_author_with: CAW, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, { fn check_inherents( &self, @@ -810,6 +812,16 @@ where block_id: BlockId, inherent_data: InherentData, ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "babe", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -908,13 +920,15 @@ where } } -impl Verifier for BabeVerifier +impl Verifier + for BabeVerifier where Block: BlockT, Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, { fn verify( &mut self, @@ -1422,7 +1436,7 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, @@ -1432,6 +1446,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, registry: Option<&Registry>, + can_author_with: CAW, ) -> ClientResult> where Inner: BlockImport> + Send + Sync + 'static, @@ -1439,6 +1454,7 @@ pub fn import_queue( Client: HeaderBackend + HeaderMetadata, Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, { register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; @@ -1449,6 +1465,7 @@ pub fn import_queue( config: babe_link.config, epoch_changes: babe_link.epoch_changes, time_source: babe_link.time_source, + can_author_with, }; Ok(BasicQueue::new( diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 958d7845edb..e302a3b3d0a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -31,7 +31,7 @@ use sp_consensus_babe::{ }; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, + NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, }; use sc_network_test::*; @@ -220,7 +220,7 @@ type TestSelectChain = substrate_test_runtime_client::LongestChain< >; pub struct TestVerifier { - inner: BabeVerifier, + inner: BabeVerifier, mutator: Mutator, } @@ -320,6 +320,7 @@ impl TestNetFactory for BabeTestNet { config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), time_source: data.link.time_source.clone(), + can_author_with: AlwaysCanAuthor, }, mutator: MUTATOR.with(|m| m.borrow().clone()), } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 42d1bc05019..f8da1877665 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -186,16 +186,19 @@ pub trait PowAlgorithm { } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, select_chain: Option, client: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, check_inherents_after: <::Header as HeaderT>::Number, + can_author_with: CAW, } -impl Clone for PowBlockImport { +impl Clone + for PowBlockImport +{ fn clone(&self) -> Self { Self { algorithm: self.algorithm.clone(), @@ -204,17 +207,19 @@ impl Clone for PowBlockImpor client: self.client.clone(), inherent_data_providers: self.inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after.clone(), + can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport where +impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, + CAW: CanAuthorWith, { /// Create a new block import suitable to be used in PoW pub fn new( @@ -224,9 +229,17 @@ impl PowBlockImport where check_inherents_after: <::Header as HeaderT>::Number, select_chain: Option, inherent_data_providers: sp_inherents::InherentDataProviders, + can_author_with: CAW, ) -> Self { - Self { inner, client, algorithm, check_inherents_after, - select_chain, inherent_data_providers } + Self { + inner, + client, + algorithm, + check_inherents_after, + select_chain, + inherent_data_providers, + can_author_with, + } } fn check_inherents( @@ -242,6 +255,16 @@ impl PowBlockImport where return Ok(()) } + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "pow", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -270,7 +293,7 @@ impl PowBlockImport where } } -impl BlockImport for PowBlockImport where +impl BlockImport for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, @@ -279,6 +302,7 @@ impl BlockImport for PowBlockImport, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static, + CAW: CanAuthorWith, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; @@ -649,7 +673,7 @@ fn mine_loop( }; log::info!("✅ Successfully mined block: {}", best_hash); - + let (hash, seal) = { let seal = DigestItem::Seal(POW_ENGINE_ID, seal); let mut header = header.clone(); diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 04b65a723e4..0e4dd91dd49 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -247,6 +247,15 @@ impl CanAuthorWith for AlwaysCanAuthor { } } +/// Never can author. +pub struct NeverCanAuthor; + +impl CanAuthorWith for NeverCanAuthor { + fn can_author_with(&self, _: &BlockId) -> Result<(), String> { + Err("Authoring is always disabled.".to_string()) + } +} + /// A type from which a slot duration can be obtained. pub trait SlotData { /// Gets the slot duration. -- GitLab From 287ecc2974f14aa5d51f4e2ba6efac32d4093a50 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Aug 2020 19:36:29 +0200 Subject: [PATCH 102/132] pow: add access to pre-digest for algorithm verifiers (#6900) * pow: fetch pre-runtime digest to verifier * Add Other error type * Fix log target and change docs to refer to pre_runtime --- client/consensus/pow/src/lib.rs | 41 +++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index f8da1877665..ca1a8584e2a 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -88,10 +88,13 @@ pub enum Error { CreateInherents(sp_inherents::Error), #[display(fmt = "Checking inherents failed: {}", _0)] CheckInherents(String), + #[display(fmt = "Multiple pre-runtime digests")] + MultiplePreRuntimeDigests, Client(sp_blockchain::Error), Codec(codec::Error), Environment(String), - Runtime(RuntimeString) + Runtime(RuntimeString), + Other(String), } impl std::convert::From> for String { @@ -172,6 +175,7 @@ pub trait PowAlgorithm { &self, parent: &BlockId, pre_hash: &B::Hash, + pre_digest: Option<&[u8]>, seal: &Seal, difficulty: Self::Difficulty, ) -> Result>; @@ -180,6 +184,7 @@ pub trait PowAlgorithm { &self, parent: &BlockId, pre_hash: &B::Hash, + pre_digest: Option<&[u8]>, difficulty: Self::Difficulty, round: u32, ) -> Result, Error>; @@ -368,9 +373,11 @@ impl BlockImport for PowBlockImport(&block.header)?; if !self.algorithm.verify( &BlockId::hash(parent_hash), &pre_hash, + pre_digest.as_ref().map(|v| &v[..]), &inner_seal, difficulty, )? { @@ -519,7 +526,7 @@ pub fn import_queue( /// However, it's not recommended to use background threads in the rest of the /// codebase. /// -/// `preruntime` is a parameter that allows a custom additional pre-runtime +/// `pre_runtime` is a parameter that allows a custom additional pre-runtime /// digest to be inserted for blocks being built. This can encode authorship /// information, or just be a graffiti. `round` is for number of rounds the /// CPU miner runs each time. This parameter should be tweaked so that each @@ -529,7 +536,7 @@ pub fn start_mine( client: Arc, algorithm: Algorithm, mut env: E, - preruntime: Option>, + pre_runtime: Option>, round: u32, mut sync_oracle: SO, build_time: std::time::Duration, @@ -557,7 +564,7 @@ pub fn start_mine( client.as_ref(), &algorithm, &mut env, - preruntime.as_ref(), + pre_runtime.as_ref(), round, &mut sync_oracle, build_time.clone(), @@ -581,7 +588,7 @@ fn mine_loop( client: &C, algorithm: &Algorithm, env: &mut E, - preruntime: Option<&Vec>, + pre_runtime: Option<&Vec>, round: u32, sync_oracle: &mut SO, build_time: std::time::Duration, @@ -640,8 +647,8 @@ fn mine_loop( let inherent_data = inherent_data_providers .create_inherent_data().map_err(Error::CreateInherents)?; let mut inherent_digest = Digest::default(); - if let Some(preruntime) = &preruntime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, preruntime.to_vec())); + if let Some(pre_runtime) = &pre_runtime { + inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } let proposal = futures::executor::block_on(proposer.propose( inherent_data, @@ -658,6 +665,7 @@ fn mine_loop( let seal = algorithm.mine( &BlockId::Hash(best_hash), &header.hash(), + pre_runtime.map(|v| &v[..]), difficulty, round, )?; @@ -702,3 +710,22 @@ fn mine_loop( .map_err(|e| Error::BlockBuiltError(best_hash, e))?; } } + +/// Find PoW pre-runtime. +fn find_pre_digest(header: &B::Header) -> Result>, Error> { + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); + match (log, pre_digest.is_some()) { + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { + return Err(Error::MultiplePreRuntimeDigests) + }, + (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { + pre_digest = Some(v.clone()); + }, + (_, _) => trace!(target: "pow", "Ignoring digest not meant for us"), + } + } + + Ok(pre_digest) +} -- GitLab From 399421abeb3de5b6bf7bbd1531764c5f94206eaa Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Aug 2020 21:07:30 +0200 Subject: [PATCH 103/132] Derive Clone for AlwaysCanAuthor, NeverCanAuthor, CanAuthorWithNativeVersion (#6906) --- primitives/consensus/common/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 0e4dd91dd49..fa4f233c680 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -212,6 +212,7 @@ pub trait CanAuthorWith { /// Checks if the node can author blocks by using /// [`NativeVersion::can_author_with`](sp_version::NativeVersion::can_author_with). +#[derive(Clone)] pub struct CanAuthorWithNativeVersion(T); impl CanAuthorWithNativeVersion { @@ -239,6 +240,7 @@ impl, Block: BlockT> CanAuthorWith CanAuthorWith for AlwaysCanAuthor { @@ -248,6 +250,7 @@ impl CanAuthorWith for AlwaysCanAuthor { } /// Never can author. +#[derive(Clone)] pub struct NeverCanAuthor; impl CanAuthorWith for NeverCanAuthor { -- GitLab From 8e1ed7d96df71688820788eff6939b8cbec8803c Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 17 Aug 2020 22:59:23 +0200 Subject: [PATCH 104/132] WeightInfo for System, Timestamp, and Utility (#6868) * initial updates to system * fix compile * Update writer.rs * update weights * finish system weights * timestamp weights * utility weight * Fix overflow in weight calculations * add back weight notes * Update for whitelisted benchmarks * add trait bounds * Revert "add trait bounds" This reverts commit 12b08b7189aa3969f96fa19b211a370860fdb240. * Update weights for unaccounted for read --- bin/node/runtime/src/lib.rs | 6 +- bin/node/runtime/src/weights/frame_system.rs | 58 +++++++++++++++++++ bin/node/runtime/src/weights/mod.rs | 3 + .../runtime/src/weights/pallet_timestamp.rs | 34 +++++++++++ .../runtime/src/weights/pallet_utility.rs | 35 +++++++++++ frame/system/benchmarking/src/lib.rs | 21 +++---- frame/system/src/default_weights.rs | 57 ++++++++++++++++++ frame/system/src/lib.rs | 38 ++++-------- frame/timestamp/src/default_weights.rs | 35 +++++++++++ frame/timestamp/src/lib.rs | 18 ++---- frame/utility/src/default_weights.rs | 34 +++++++++++ frame/utility/src/lib.rs | 19 ++---- frame/utility/src/tests.rs | 20 ++++++- utils/frame/benchmarking-cli/src/writer.rs | 6 ++ 14 files changed, 316 insertions(+), 68 deletions(-) create mode 100644 bin/node/runtime/src/weights/frame_system.rs create mode 100644 bin/node/runtime/src/weights/pallet_timestamp.rs create mode 100644 bin/node/runtime/src/weights/pallet_utility.rs create mode 100644 frame/system/src/default_weights.rs create mode 100644 frame/timestamp/src/default_weights.rs create mode 100644 frame/utility/src/default_weights.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index acc1b072818..aa0ddfc61a7 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -182,13 +182,13 @@ impl frame_system::Trait for Runtime { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); - type SystemWeightInfo = (); + type SystemWeightInfo = weights::frame_system::WeightInfo; } impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; - type WeightInfo = (); + type WeightInfo = weights::pallet_utility::WeightInfo; } parameter_types! { @@ -352,7 +352,7 @@ impl pallet_timestamp::Trait for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); + type WeightInfo = weights::pallet_timestamp::WeightInfo; } parameter_types! { diff --git a/bin/node/runtime/src/weights/frame_system.rs b/bin/node/runtime/src/weights/frame_system.rs new file mode 100644 index 00000000000..9522fa75203 --- /dev/null +++ b/bin/node/runtime/src/weights/frame_system.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl frame_system::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs index 70bae879ce0..0e078e7ac08 100644 --- a/bin/node/runtime/src/weights/mod.rs +++ b/bin/node/runtime/src/weights/mod.rs @@ -15,5 +15,8 @@ //! A list of the different weight modules for our runtime. +pub mod frame_system; pub mod pallet_balances; pub mod pallet_democracy; +pub mod pallet_timestamp; +pub mod pallet_utility; diff --git a/bin/node/runtime/src/weights/pallet_timestamp.rs b/bin/node/runtime/src/weights/pallet_timestamp.rs new file mode 100644 index 00000000000..cfd5f192d35 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_timestamp.rs @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_timestamp::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/bin/node/runtime/src/weights/pallet_utility.rs b/bin/node/runtime/src/weights/pallet_utility.rs new file mode 100644 index 00000000000..c9ae0d7d233 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_utility.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_utility::WeightInfo for WeightInfo { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 049fa5298c6..a3e7797996a 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -25,6 +25,7 @@ use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; use frame_benchmarking::{benchmarks, account}; +use frame_support::traits::Get; use frame_support::storage::{self, StorageMap}; use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; @@ -39,29 +40,26 @@ benchmarks! { _ { } remark { - // # of Bytes - let b in 0 .. 16_384; + let b in 0 .. T::MaximumBlockLength::get(); let remark_message = vec![1; b as usize]; let caller = account("caller", 0, SEED); }: _(RawOrigin::Signed(caller), remark_message) set_heap_pages { - // Heap page size - let i in 0 .. u32::max_value(); - }: _(RawOrigin::Root, i.into()) + }: _(RawOrigin::Root, Default::default()) // `set_code` was not benchmarked because it is pretty hard to come up with a real // Wasm runtime to test the upgrade with. But this is okay because we will make // `set_code` take a full block anyway. + #[extra] set_code_without_checks { - // Version number - let b in 0 .. 16_384; - let code = vec![1; b as usize]; + // Assume Wasm ~4MB + let code = vec![1; 4_000_000 as usize]; }: _(RawOrigin::Root, code) verify { let current_code = storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; - assert_eq!(current_code.len(), b as usize); + assert_eq!(current_code.len(), 4_000_000 as usize); } set_changes_trie_config { @@ -141,16 +139,15 @@ benchmarks! { } suicide { - let n in 1 .. 1000; let caller: T::AccountId = account("caller", 0, SEED); let account_info = AccountInfo:: { - nonce: n.into(), + nonce: 1337.into(), refcount: 0, data: T::AccountData::default() }; frame_system::Account::::insert(&caller, account_info); let new_account_info = System::::account(caller.clone()); - assert_eq!(new_account_info.nonce, n.into()); + assert_eq!(new_account_info.nonce, 1337.into()); }: _(RawOrigin::Signed(caller.clone())) verify { let account_info = System::::account(&caller); diff --git a/frame/system/src/default_weights.rs b/frame/system/src/default_weights.rs new file mode 100644 index 00000000000..8a84cb0b790 --- /dev/null +++ b/frame/system/src/default_weights.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 30d5d019fc5..fcd31923a24 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -139,6 +139,7 @@ mod extensions; mod weights; #[cfg(test)] mod tests; +mod default_weights; pub use extensions::{ check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, @@ -159,25 +160,13 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { } pub trait WeightInfo { - fn remark(b: u32, ) -> Weight; - fn set_heap_pages(i: u32, ) -> Weight; - fn set_code_without_checks(b: u32, ) -> Weight; - fn set_changes_trie_config(d: u32, ) -> Weight; + fn remark() -> Weight; + fn set_heap_pages() -> Weight; + fn set_changes_trie_config() -> Weight; fn set_storage(i: u32, ) -> Weight; fn kill_storage(i: u32, ) -> Weight; fn kill_prefix(p: u32, ) -> Weight; - fn suicide(n: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn remark(_b: u32, ) -> Weight { 1_000_000_000 } - fn set_heap_pages(_i: u32, ) -> Weight { 1_000_000_000 } - fn set_code_without_checks(_b: u32, ) -> Weight { 1_000_000_000 } - fn set_changes_trie_config(_d: u32, ) -> Weight { 1_000_000_000 } - fn set_storage(_i: u32, ) -> Weight { 1_000_000_000 } - fn kill_storage(_i: u32, ) -> Weight { 1_000_000_000 } - fn kill_prefix(_p: u32, ) -> Weight { 1_000_000_000 } - fn suicide(_n: u32, ) -> Weight { 1_000_000_000 } + fn suicide() -> Weight; } pub trait Trait: 'static + Eq + Clone { @@ -564,7 +553,7 @@ decl_module! { /// - Base Weight: 0.665 µs, independent of remark length. /// - No DB operations. /// # - #[weight = 700_000] + #[weight = T::SystemWeightInfo::remark()] fn remark(origin, _remark: Vec) { ensure_signed(origin)?; } @@ -577,7 +566,7 @@ decl_module! { /// - Base Weight: 1.405 µs /// - 1 write to HEAP_PAGES /// # - #[weight = (T::DbWeight::get().writes(1) + 1_500_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational)] fn set_heap_pages(origin, pages: u64) { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); @@ -627,7 +616,7 @@ decl_module! { /// - DB Weight: /// - Writes: Changes Trie, System Digest /// # - #[weight = (T::DbWeight::get().writes(2) + 10_000_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational)] pub fn set_changes_trie_config(origin, changes_trie_config: Option) { ensure_root(origin)?; match changes_trie_config.clone() { @@ -653,8 +642,7 @@ decl_module! { /// - Writes: Number of items /// # #[weight = ( - T::DbWeight::get().writes(items.len() as Weight) - .saturating_add((items.len() as Weight).saturating_mul(600_000)), + T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, )] fn set_storage(origin, items: Vec) { @@ -673,8 +661,7 @@ decl_module! { /// - Writes: Number of items /// # #[weight = ( - T::DbWeight::get().writes(keys.len() as Weight) - .saturating_add((keys.len() as Weight).saturating_mul(400_000)), + T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, )] fn kill_storage(origin, keys: Vec) { @@ -696,8 +683,7 @@ decl_module! { /// - Writes: Number of subkeys + 1 /// # #[weight = ( - T::DbWeight::get().writes(Weight::from(*_subkeys) + 1) - .saturating_add((Weight::from(*_subkeys) + 1).saturating_mul(850_000)), + T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, )] fn kill_prefix(origin, prefix: Key, _subkeys: u32) { @@ -715,7 +701,7 @@ decl_module! { /// Base Weight: 8.626 µs /// No DB Read or Write operations because caller is already in overlay /// # - #[weight = (10_000_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::suicide(), DispatchClass::Operational)] pub fn suicide(origin) { let who = ensure_signed(origin)?; let account = Account::::get(&who); diff --git a/frame/timestamp/src/default_weights.rs b/frame/timestamp/src/default_weights.rs new file mode 100644 index 00000000000..726b3444e25 --- /dev/null +++ b/frame/timestamp/src/default_weights.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 1177165abed..d74a94cb920 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -93,6 +93,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +mod default_weights; use sp_std::{result, cmp}; use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; @@ -116,13 +117,8 @@ use sp_timestamp::{ }; pub trait WeightInfo { - fn set(t: u32, ) -> Weight; - fn on_finalize(t: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn set(_t: u32, ) -> Weight { 1_000_000_000 } - fn on_finalize(_t: u32, ) -> Weight { 1_000_000_000 } + fn set() -> Weight; + fn on_finalize() -> Weight; } /// The module configuration trait @@ -166,12 +162,9 @@ decl_module! { /// - `O(T)` where `T` complexity of `on_timestamp_set` /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in `on_finalize`) /// - 1 event handler `on_timestamp_set` `O(T)`. - /// - Benchmark: 7.678 (min squares analysis) - /// - NOTE: This benchmark was done for a runtime with insignificant `on_timestamp_set` handlers. - /// New benchmarking is needed when adding new handlers. /// # #[weight = ( - T::DbWeight::get().reads_writes(2, 1) + 8_000_000, + T::WeightInfo::set(), DispatchClass::Mandatory )] fn set(origin, #[compact] now: T::Moment) { @@ -191,13 +184,12 @@ decl_module! { /// dummy `on_initialize` to return the weight used in `on_finalize`. fn on_initialize() -> Weight { // weight of `on_finalize` - 5_000_000 + T::WeightInfo::on_finalize() } /// # /// - `O(1)` /// - 1 storage deletion (codec `O(1)`). - /// - Benchmark: 4.928 µs (min squares analysis) /// # fn on_finalize() { assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); diff --git a/frame/utility/src/default_weights.rs b/frame/utility/src/default_weights.rs new file mode 100644 index 00000000000..d023dbddd4f --- /dev/null +++ b/frame/utility/src/default_weights.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index cf2ea9119b9..d67fdc85db5 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -69,15 +69,11 @@ use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; mod tests; mod benchmarking; +mod default_weights; pub trait WeightInfo { fn batch(c: u32, ) -> Weight; - fn as_derivative(u: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn batch(_c: u32, ) -> Weight { 1_000_000_000 } - fn as_derivative(_u: u32, ) -> Weight { 1_000_000_000 } + fn as_derivative() -> Weight; } /// Configuration trait. @@ -145,7 +141,8 @@ decl_module! { #[weight = ( calls.iter() .map(|call| call.get_dispatch_info().weight) - .fold(15_000_000, |a: Weight, n| a.saturating_add(n).saturating_add(1_000_000)), + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .saturating_add(T::WeightInfo::batch(calls.len() as u32)), { let all_operational = calls.iter() .map(|call| call.get_dispatch_info().class) @@ -186,13 +183,9 @@ decl_module! { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - Base weight: 2.861 µs - /// - Plus the weight of the `call` - /// # #[weight = ( - call.get_dispatch_info().weight.saturating_add(3_000_000), + T::WeightInfo::as_derivative() + .saturating_add(call.get_dispatch_info().weight), call.get_dispatch_info().class, )] fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResult { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 6de70506e45..611c42907ca 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -54,7 +54,7 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockWeight: Weight = Weight::max_value(); pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } @@ -121,6 +121,7 @@ type System = frame_system::Module; type Balances = pallet_balances::Module; type Utility = Module; +use frame_system::Call as SystemCall; use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; @@ -236,3 +237,20 @@ fn batch_early_exit_works() { assert_eq!(Balances::free_balance(2), 15); }); } + +#[test] +fn batch_weight_calculation_doesnt_overflow() { + new_test_ext().execute_with(|| { + let big_call = Call::System(SystemCall::fill_block(Perbill::from_percent(50))); + assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); + + // 3 * 50% saturates to 100% + let batch_call = Call::Utility(crate::Call::batch(vec![ + big_call.clone(), + big_call.clone(), + big_call.clone(), + ])); + + assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); + }); +} diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 2bc17aa85bd..964c1bf5fc1 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -107,6 +107,12 @@ pub fn write_results(batches: &[BenchmarkBatch]) -> Result<(), std::io::Error> { VERSION, )?; + // allow statements + write!( + file, + "#![allow(unused_parens)]\n#![allow(unused_imports)]\n\n", + )?; + // general imports write!( file, -- GitLab From f8c83bd5aec65dad638daaaf49ede837165e026c Mon Sep 17 00:00:00 2001 From: Roman Borschel Date: Tue, 18 Aug 2020 07:59:32 +0200 Subject: [PATCH 105/132] Add support for sourced metrics. (#6895) * Add support for sourced metrics. A sourced metric is a metric that obtains its values from an existing source, rather than the values being independently recorded. It thus allows collecting metrics from existing counters or gauges without having to duplicate them in a dedicated prometheus counter or gauge (and hence another atomic value). The first use-case is to feed the bandwidth counters from libp2p directly into prometheus. * Tabs, not spaces. * Tweak bandwidth counter registration. * Add debug assertion for variable labels and values. * Document monotonicity requirement for sourced counters. * CI * Update client/network/src/service.rs Co-authored-by: Max Inden Co-authored-by: Max Inden --- client/network/src/service.rs | 59 +++++++++---- utils/prometheus/src/lib.rs | 3 + utils/prometheus/src/sourced.rs | 143 ++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+), 18 deletions(-) create mode 100644 utils/prometheus/src/sourced.rs diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d42af16f1d2..713357772d4 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -53,6 +53,7 @@ use parking_lot::Mutex; use prometheus_endpoint::{ register, Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, + SourcedCounter, MetricSource }; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; @@ -240,12 +241,6 @@ impl NetworkWorker { local_peer_id_legacy ); - // Initialize the metrics. - let metrics = match ¶ms.metrics_registry { - Some(registry) => Some(Metrics::register(®istry)?), - None => None - }; - let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); @@ -353,6 +348,17 @@ impl NetworkWorker { (builder.build(), bandwidth) }; + // Initialize the metrics. + let metrics = match ¶ms.metrics_registry { + Some(registry) => { + // Sourced metrics. + BandwidthCounters::register(registry, bandwidth.clone())?; + // Other (i.e. new) metrics. + Some(Metrics::register(registry)?) + } + None => None + }; + // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { @@ -1152,9 +1158,6 @@ struct Metrics { kbuckets_num_nodes: GaugeVec, listeners_local_addresses: Gauge, listeners_errors_total: Counter, - // Note: `network_bytes_total` is a monotonic gauge obtained by - // sampling an existing counter. - network_bytes_total: GaugeVec, notifications_sizes: HistogramVec, notifications_streams_closed_total: CounterVec, notifications_streams_opened_total: CounterVec, @@ -1168,6 +1171,35 @@ struct Metrics { requests_out_started_total: CounterVec, } +/// The source for bandwidth metrics. +#[derive(Clone)] +struct BandwidthCounters(Arc); + +impl BandwidthCounters { + fn register(registry: &Registry, sinks: Arc) + -> Result<(), PrometheusError> + { + register(SourcedCounter::new( + &Opts::new( + "sub_libp2p_network_bytes_total", + "Total bandwidth usage" + ).variable_label("direction"), + BandwidthCounters(sinks), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for BandwidthCounters { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[&"in"], self.0.total_inbound()); + set(&[&"out"], self.0.total_outbound()); + } +} + impl Metrics { fn register(registry: &Registry) -> Result { Ok(Self { @@ -1271,13 +1303,6 @@ impl Metrics { "sub_libp2p_listeners_errors_total", "Total number of non-fatal errors reported by a listener" )?, registry)?, - network_bytes_total: register(GaugeVec::new( - Opts::new( - "sub_libp2p_network_bytes_total", - "Total bandwidth usage" - ), - &["direction"] - )?, registry)?, notifications_sizes: register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( @@ -1725,8 +1750,6 @@ impl Future for NetworkWorker { this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - metrics.network_bytes_total.with_label_values(&["in"]).set(this.service.bandwidth.total_inbound()); - metrics.network_bytes_total.with_label_values(&["out"]).set(this.service.bandwidth.total_outbound()); metrics.is_major_syncing.set(is_major_syncing as u64); for (proto, num_entries) in this.network_service.num_kbuckets_entries() { let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index 9030704cb74..be7050a8a07 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -31,6 +31,9 @@ use std::net::SocketAddr; #[cfg(not(target_os = "unknown"))] mod networking; +mod sourced; + +pub use sourced::{SourcedCounter, SourcedGauge, MetricSource}; #[cfg(target_os = "unknown")] pub use unknown_os::init_prometheus; diff --git a/utils/prometheus/src/sourced.rs b/utils/prometheus/src/sourced.rs new file mode 100644 index 00000000000..58f60e4969b --- /dev/null +++ b/utils/prometheus/src/sourced.rs @@ -0,0 +1,143 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Metrics that are collected from existing sources. + +use prometheus::core::{Collector, Desc, Describer, Number, Opts}; +use prometheus::proto; +use std::{cmp::Ordering, marker::PhantomData}; + +/// A counter whose values are obtained from an existing source. +/// +/// > **Note*: The counter values provided by the source `S` +/// > must be monotonically increasing. Otherwise use a +/// > [`SourcedGauge`] instead. +pub type SourcedCounter = SourcedMetric; + +/// A gauge whose values are obtained from an existing source. +pub type SourcedGauge = SourcedMetric; + +/// The type of a sourced counter. +#[derive(Copy, Clone)] +pub enum Counter {} + +/// The type of a sourced gauge. +#[derive(Copy, Clone)] +pub enum Gauge {} + +/// A metric whose values are obtained from an existing source, +/// instead of being independently recorded. +#[derive(Debug, Clone)] +pub struct SourcedMetric { + source: S, + desc: Desc, + _type: PhantomData, +} + +/// A source of values for a [`SourcedMetric`]. +pub trait MetricSource: Sync + Send + Clone { + /// The type of the collected values. + type N: Number; + /// Collects the current values of the metrics from the source. + fn collect(&self, set: impl FnMut(&[&str], Self::N)); +} + +impl SourcedMetric { + /// Creates a new metric that obtains its values from the given source. + pub fn new(opts: &Opts, source: S) -> prometheus::Result { + let desc = opts.describe()?; + Ok(Self { source, desc, _type: PhantomData }) + } +} + +impl Collector for SourcedMetric { + fn desc(&self) -> Vec<&Desc> { + vec![&self.desc] + } + + fn collect(&self) -> Vec { + let mut counters = Vec::new(); + + self.source.collect(|label_values, value| { + let mut m = proto::Metric::default(); + + match T::proto() { + proto::MetricType::COUNTER => { + let mut c = proto::Counter::default(); + c.set_value(value.into_f64()); + m.set_counter(c); + } + proto::MetricType::GAUGE => { + let mut g = proto::Gauge::default(); + g.set_value(value.into_f64()); + m.set_gauge(g); + } + t => { + log::error!("Unsupported sourced metric type: {:?}", t); + } + } + + debug_assert_eq!(self.desc.variable_labels.len(), label_values.len()); + match self.desc.variable_labels.len().cmp(&label_values.len()) { + Ordering::Greater => + log::warn!("Missing label values for sourced metric {}", self.desc.fq_name), + Ordering::Less => + log::warn!("Too many label values for sourced metric {}", self.desc.fq_name), + Ordering::Equal => {} + } + + m.set_label(self.desc.variable_labels.iter().zip(label_values) + .map(|(l_name, l_value)| { + let mut l = proto::LabelPair::default(); + l.set_name(l_name.to_string()); + l.set_value(l_value.to_string()); + l + }) + .chain(self.desc.const_label_pairs.iter().cloned()) + .collect::>()); + + counters.push(m); + }); + + let mut m = proto::MetricFamily::default(); + m.set_name(self.desc.fq_name.clone()); + m.set_help(self.desc.help.clone()); + m.set_field_type(T::proto()); + m.set_metric(counters); + + vec![m] + } +} + +/// Types of metrics that can obtain their values from an existing source. +pub trait SourcedType: private::Sealed + Sync + Send { + #[doc(hidden)] + fn proto() -> proto::MetricType; +} + +impl SourcedType for Counter { + fn proto() -> proto::MetricType { proto::MetricType::COUNTER } +} + +impl SourcedType for Gauge { + fn proto() -> proto::MetricType { proto::MetricType::GAUGE } +} + +mod private { + pub trait Sealed {} + impl Sealed for super::Counter {} + impl Sealed for super::Gauge {} +} -- GitLab From 3f49041b474f224ab08b29526364e77333ce62d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Aug 2020 09:36:58 +0200 Subject: [PATCH 106/132] Import `IterableStorage*` traits by `decl_storage!` (#6907) Import `IterableStorageMap` and `IterableStorageDoubleMap` automatically by `decl_storage!` as the other storage traits. --- frame/support/procedural/src/storage/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index b42639c30c5..0aa0a3cad7c 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -416,6 +416,8 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr StorageMap as _, StorageDoubleMap as _, StoragePrefixedMap as _, + IterableStorageMap as _, + IterableStorageDoubleMap as _, }; #scrate_decl -- GitLab From 265dd7418306d80da9669f8108ab8475b34f9ad7 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 19 Aug 2020 13:45:30 +0200 Subject: [PATCH 107/132] Distribute the network future polling time more evenly (#6903) * Distribute the network future polling time more evenly * Update client/network/src/service.rs --- client/network/src/service.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 713357772d4..3ca74525935 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1405,7 +1405,22 @@ impl Future for NetworkWorker { } } + // At the time of writing of this comment, due to a high volume of messages, the network + // worker sometimes takes a long time to process the loop below. When that happens, the + // rest of the polling is frozen. In order to avoid negative side-effects caused by this + // freeze, a limit to the number of iterations is enforced below. If the limit is reached, + // the task is interrupted then scheduled again. + // + // This allows for a more even distribution in the time taken by each sub-part of the + // polling. + let mut num_iterations = 0; loop { + num_iterations += 1; + if num_iterations >= 100 { + cx.waker().wake_by_ref(); + break; + } + // Process the next message coming from the `NetworkService`. let msg = match this.from_service.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => msg, @@ -1445,7 +1460,16 @@ impl Future for NetworkWorker { } } + // `num_iterations` serves the same purpose as in the previous loop. + // See the previous loop for explanations. + let mut num_iterations = 0; loop { + num_iterations += 1; + if num_iterations >= 1000 { + cx.waker().wake_by_ref(); + break; + } + // Process the next action coming from the network. let next_event = this.network_service.next_event(); futures::pin_mut!(next_event); -- GitLab From bf3aefd3fed60ff6316602987b584721fbb91c2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 19 Aug 2020 17:30:56 +0200 Subject: [PATCH 108/132] Combine default values used at initialization in a trait (#6857) This moves default values used in the Substrate code base when initializing a service into a common trait. Currently this trait only contains listen ports, but this could be extended in the future. Essentially this will make overriding these values much easier for Cumulus, where we have 2 nodes running in one binary. --- client/cli/src/commands/mod.rs | 42 +++++++++------ client/cli/src/commands/run_cmd.rs | 17 +++--- client/cli/src/config.rs | 69 +++++++++++++++++++------ client/cli/src/lib.rs | 2 +- client/cli/src/params/network_params.rs | 3 +- 5 files changed, 94 insertions(+), 39 deletions(-) diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 04cce66bef8..5d4f4fe18db 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -64,7 +64,6 @@ pub enum Subcommand { ExportState(ExportStateCmd), } -// TODO: move to config.rs? /// Macro that helps implement CliConfiguration on an enum of subcommand automatically /// /// # Example @@ -189,17 +188,24 @@ macro_rules! substrate_cli_subcommands { fn network_config( &self, - chain_spec: &::std::boxed::Box, + chain_spec: &std::boxed::Box, is_dev: bool, - net_config_dir: ::std::path::PathBuf, + net_config_dir: std::path::PathBuf, client_id: &str, node_name: &str, - node_key: ::sc_service::config::NodeKeyConfig, + node_key: sc_service::config::NodeKeyConfig, + default_listen_port: u16, ) -> $crate::Result<::sc_service::config::NetworkConfiguration> { match self { $( $enum::$variant(cmd) => cmd.network_config( - chain_spec, is_dev, net_config_dir, client_id, node_name, node_key + chain_spec, + is_dev, + net_config_dir, + client_id, + node_name, + node_key, + default_listen_port, ) ),* } @@ -291,15 +297,21 @@ macro_rules! substrate_cli_subcommands { } } - fn rpc_http(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { + fn rpc_http( + &self, + default_listen_port: u16, + ) -> $crate::Result> { match self { - $($enum::$variant(cmd) => cmd.rpc_http()),* + $($enum::$variant(cmd) => cmd.rpc_http(default_listen_port)),* } } - fn rpc_ws(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { + fn rpc_ws( + &self, + default_listen_port: u16, + ) -> $crate::Result> { match self { - $($enum::$variant(cmd) => cmd.rpc_ws()),* + $($enum::$variant(cmd) => cmd.rpc_ws(default_listen_port)),* } } @@ -316,23 +328,23 @@ macro_rules! substrate_cli_subcommands { } fn rpc_cors(&self, is_dev: bool) - -> $crate::Result<::std::option::Option<::std::vec::Vec>> { + -> $crate::Result>> { match self { $($enum::$variant(cmd) => cmd.rpc_cors(is_dev)),* } } - fn prometheus_config(&self) - -> $crate::Result<::std::option::Option<::sc_service::config::PrometheusConfig>> { + fn prometheus_config(&self, default_listen_port: u16) + -> $crate::Result> { match self { - $($enum::$variant(cmd) => cmd.prometheus_config()),* + $($enum::$variant(cmd) => cmd.prometheus_config(default_listen_port)),* } } fn telemetry_endpoints( &self, - chain_spec: &Box, - ) -> $crate::Result<::std::option::Option<::sc_service::config::TelemetryEndpoints>> { + chain_spec: &Box, + ) -> $crate::Result> { match self { $($enum::$variant(cmd) => cmd.telemetry_endpoints(chain_spec)),* } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index de5589196f2..019b760e5b4 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -382,7 +382,7 @@ impl CliConfiguration for RunCmd { Ok(self.shared_params.dev || self.force_authoring) } - fn prometheus_config(&self) -> Result> { + fn prometheus_config(&self, default_listen_port: u16) -> Result> { Ok(if self.no_prometheus { None } else { @@ -393,7 +393,10 @@ impl CliConfiguration for RunCmd { }; Some(PrometheusConfig::new_with_default_registry( - SocketAddr::new(interface.into(), self.prometheus_port.unwrap_or(9615)) + SocketAddr::new( + interface.into(), + self.prometheus_port.unwrap_or(default_listen_port), + ) )) }) } @@ -427,7 +430,7 @@ impl CliConfiguration for RunCmd { .into()) } - fn rpc_http(&self) -> Result> { + fn rpc_http(&self, default_listen_port: u16) -> Result> { let interface = rpc_interface( self.rpc_external, self.unsafe_rpc_external, @@ -435,22 +438,22 @@ impl CliConfiguration for RunCmd { self.validator )?; - Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(9933)))) + Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(default_listen_port)))) } fn rpc_ipc(&self) -> Result> { Ok(self.ipc_path.clone()) } - fn rpc_ws(&self) -> Result> { + fn rpc_ws(&self, default_listen_port: u16) -> Result> { let interface = rpc_interface( self.ws_external, self.unsafe_ws_external, self.rpc_methods, - self.validator + self.validator, )?; - Ok(Some(SocketAddr::new(interface, self.ws_port.unwrap_or(9944)))) + Ok(Some(SocketAddr::new(interface, self.ws_port.unwrap_or(default_listen_port)))) } fn rpc_methods(&self) -> Result { diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index efda45a0eca..ff0222216ce 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -41,8 +41,44 @@ pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; /// default sub directory to store network config pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &'static str = "network"; +/// Default configuration values used by Substrate +/// +/// These values will be used by [`CliConfiguritation`] to set +/// default values for e.g. the listen port or the RPC port. +pub trait DefaultConfigurationValues { + /// The port Substrate should listen on for p2p connections. + /// + /// By default this is `30333`. + fn p2p_listen_port() -> u16 { + 30333 + } + + /// The port Substrate should listen on for websocket connections. + /// + /// By default this is `9944`. + fn rpc_ws_listen_port() -> u16 { + 9944 + } + + /// The port Substrate should listen on for http connections. + /// + /// By default this is `9933`. + fn rpc_http_listen_port() -> u16 { + 9933 + } + + /// The port Substrate should listen on for prometheus connections. + /// + /// By default this is `9615`. + fn prometheus_listen_port() -> u16 { + 9615 + } +} + +impl DefaultConfigurationValues for () {} + /// A trait that allows converting an object to a Configuration -pub trait CliConfiguration: Sized { +pub trait CliConfiguration: Sized { /// Get the SharedParams for this object fn shared_params(&self) -> &SharedParams; @@ -122,6 +158,7 @@ pub trait CliConfiguration: Sized { client_id: &str, node_name: &str, node_key: NodeKeyConfig, + default_listen_port: u16, ) -> Result { Ok(if let Some(network_params) = self.network_params() { network_params.network_config( @@ -131,6 +168,7 @@ pub trait CliConfiguration: Sized { client_id, node_name, node_key, + default_listen_port, ) } else { NetworkConfiguration::new( @@ -257,22 +295,22 @@ pub trait CliConfiguration: Sized { /// Get the RPC HTTP address (`None` if disabled). /// /// By default this is `None`. - fn rpc_http(&self) -> Result> { - Ok(Default::default()) + fn rpc_http(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Get the RPC IPC path (`None` if disabled). /// /// By default this is `None`. fn rpc_ipc(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the RPC websocket address (`None` if disabled). /// /// By default this is `None`. - fn rpc_ws(&self) -> Result> { - Ok(Default::default()) + fn rpc_ws(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Returns the RPC method set to expose. @@ -287,12 +325,12 @@ pub trait CliConfiguration: Sized { /// /// By default this is `None`. fn rpc_ws_max_connections(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the RPC cors (`None` if disabled) /// - /// By default this is `None`. + /// By default this is `Some(Vec::new())`. fn rpc_cors(&self, _is_dev: bool) -> Result>> { Ok(Some(Vec::new())) } @@ -300,8 +338,8 @@ pub trait CliConfiguration: Sized { /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. - fn prometheus_config(&self) -> Result> { - Ok(Default::default()) + fn prometheus_config(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Get the telemetry endpoints (if any) @@ -318,14 +356,14 @@ pub trait CliConfiguration: Sized { /// /// By default this is `None`. fn telemetry_external_transport(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the default value for heap pages /// /// By default this is `None`. fn default_heap_pages(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Returns an offchain worker config wrapped in `Ok(_)` @@ -445,6 +483,7 @@ pub trait CliConfiguration: Sized { client_id.as_str(), self.node_name()?.as_str(), node_key, + DCV::p2p_listen_port(), )?, keystore: self.keystore_config(&config_dir)?, database: self.database_config(&config_dir, database_cache_size, database)?, @@ -453,13 +492,13 @@ pub trait CliConfiguration: Sized { pruning: self.pruning(unsafe_pruning, &role)?, wasm_method: self.wasm_method()?, execution_strategies: self.execution_strategies(is_dev, is_validator)?, - rpc_http: self.rpc_http()?, - rpc_ws: self.rpc_ws()?, + rpc_http: self.rpc_http(DCV::rpc_http_listen_port())?, + rpc_ws: self.rpc_ws(DCV::rpc_ws_listen_port())?, rpc_ipc: self.rpc_ipc()?, rpc_methods: self.rpc_methods()?, rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, - prometheus_config: self.prometheus_config()?, + prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, telemetry_external_transport: self.telemetry_external_transport()?, default_heap_pages: self.default_heap_pages()?, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index f940ab0b95d..021f349aaf2 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -209,7 +209,7 @@ pub trait SubstrateCli: Sized { } /// Only create a Configuration for the command provided in argument - fn create_configuration( + fn create_configuration, DVC: DefaultConfigurationValues>( &self, command: &T, task_executor: TaskExecutor, diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 253585544d2..4a33644e893 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -114,8 +114,9 @@ impl NetworkParams { client_id: &str, node_name: &str, node_key: NodeKeyConfig, + default_listen_port: u16, ) -> NetworkConfiguration { - let port = self.port.unwrap_or(30333); + let port = self.port.unwrap_or(default_listen_port); let listen_addresses = if self.listen_addr.is_empty() { vec![ -- GitLab From 3c3461d1cbcabf29e783ddcfd99b2eb54d67d887 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 19 Aug 2020 17:11:14 +0100 Subject: [PATCH 109/132] babe: handle error when checking/reporting equivocations (#6915) --- client/consensus/babe/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 9e7c3c9081b..67aca1dd43e 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -994,13 +994,15 @@ where // the header is valid but let's check if there was something else already // proposed at the same slot by the given author. if there was, we will // report the equivocation to the runtime. - self.check_and_report_equivocation( + if let Err(err) = self.check_and_report_equivocation( slot_now, slot_number, &header, &verified_info.author, &origin, - )?; + ) { + warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); + } // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents -- GitLab From 368903f7aa9ef652bf8157d476dc13cb36e3affc Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 19 Aug 2020 18:15:50 +0200 Subject: [PATCH 110/132] Dynamic Benchmarking DB Whitelist (#6815) * Add `get_whitelist` api * add whitelisted caller * Whitelist caller * remove caller 0 * initial piping of origin (not actual value yet) * remove attempt to pass origin around * Add whitelist for `DidUpdate` storage on `pallet_timestamp` * fix traits * only add to whitelist if !contains * PassBy not implemented error * Whitelist read/writes explicitly per key * update docs * reduce trait constraint * copy pasta * Apply suggestions from code review Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Popiak * rename functions @apopiak * missed some renaming * enable doc tests * Update docs Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Popiak --- Cargo.lock | 4 + bin/node/runtime/src/lib.rs | 18 +- client/db/src/bench.rs | 37 +-- frame/balances/src/benchmarking.rs | 8 +- frame/benchmarking/Cargo.toml | 4 + frame/benchmarking/src/lib.rs | 293 ++++++++++++++-------- frame/benchmarking/src/tests.rs | 12 +- frame/benchmarking/src/utils.rs | 47 +++- frame/collective/src/benchmarking.rs | 14 +- frame/indices/src/benchmarking.rs | 10 +- frame/proxy/src/benchmarking.rs | 18 +- frame/staking/src/benchmarking.rs | 20 +- frame/system/benchmarking/src/lib.rs | 8 +- frame/timestamp/src/benchmarking.rs | 14 +- frame/treasury/src/benchmarking.rs | 22 +- frame/utility/src/benchmarking.rs | 7 +- frame/vesting/src/benchmarking.rs | 12 +- primitives/externalities/src/lib.rs | 11 +- primitives/runtime-interface/Cargo.toml | 1 + primitives/runtime-interface/src/impls.rs | 4 + primitives/state-machine/src/backend.rs | 14 +- primitives/state-machine/src/basic.rs | 8 +- primitives/state-machine/src/ext.rs | 8 +- primitives/state-machine/src/read_only.rs | 8 +- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 21 ++ 26 files changed, 421 insertions(+), 205 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec5af8aca4e..c80c0557443 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1559,6 +1559,7 @@ version = "2.0.0-rc5" dependencies = [ "frame-support", "frame-system", + "hex-literal", "linregress", "parity-scale-codec", "paste", @@ -1567,6 +1568,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-std", + "sp-storage", ] [[package]] @@ -8047,6 +8049,7 @@ dependencies = [ "sp-runtime-interface-test-wasm", "sp-state-machine", "sp-std", + "sp-storage", "sp-tracing", "sp-wasm-interface", "static_assertions", @@ -8176,6 +8179,7 @@ name = "sp-storage" version = "2.0.0-rc5" dependencies = [ "impl-serde 0.2.3", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index aa0ddfc61a7..9d19f20c5e1 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1130,7 +1130,7 @@ impl_runtime_apis! { repeat: u32, extra: bool, ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. // To get around that, we separated the Session benchmarks into its own crate, which is why // we need these two lines below. @@ -1142,21 +1142,19 @@ impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} - let whitelist: Vec> = vec![ + let whitelist: Vec = vec![ // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; let mut batches = Vec::::new(); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c3bed3e24f6..93b8048529f 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -24,7 +24,10 @@ use std::collections::HashMap; use hash_db::{Prefix, Hasher}; use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::{storage::ChildInfo, hexdisplay::HexDisplay}; +use sp_core::{ + storage::{ChildInfo, TrackedStorageKey}, + hexdisplay::HexDisplay +}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend, StorageCollection}; @@ -95,7 +98,7 @@ pub struct BenchmarkingState { shared_cache: SharedCache, // shared cache is always empty key_tracker: RefCell, KeyTracker>>, read_write_tracker: RefCell, - whitelist: RefCell>>, + whitelist: RefCell>, } impl BenchmarkingState { @@ -155,15 +158,14 @@ impl BenchmarkingState { fn add_whitelist_to_tracker(&self) { let mut key_tracker = self.key_tracker.borrow_mut(); - let whitelisted = KeyTracker { - has_been_read: true, - has_been_written: true, - }; - let whitelist = self.whitelist.borrow(); whitelist.iter().for_each(|key| { - key_tracker.insert(key.to_vec(), whitelisted); + let whitelisted = KeyTracker { + has_been_read: key.has_been_read, + has_been_written: key.has_been_written, + }; + key_tracker.insert(key.key.clone(), whitelisted); }); } @@ -181,18 +183,21 @@ impl BenchmarkingState { let maybe_tracker = key_tracker.get(key); - let has_been_read = KeyTracker { - has_been_read: true, - has_been_written: false, - }; - match maybe_tracker { None => { + let has_been_read = KeyTracker { + has_been_read: true, + has_been_written: false, + }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); }, Some(tracker) => { if !tracker.has_been_read { + let has_been_read = KeyTracker { + has_been_read: true, + has_been_written: tracker.has_been_written, + }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); } else { @@ -426,7 +431,11 @@ impl StateBackend> for BenchmarkingState { self.wipe_tracker() } - fn set_whitelist(&self, new: Vec>) { + fn get_whitelist(&self) -> Vec { + self.whitelist.borrow().to_vec() + } + + fn set_whitelist(&self, new: Vec) { *self.whitelist.borrow_mut() = new; } diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 73547fe814a..21f43c7c636 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Balances; @@ -40,7 +40,7 @@ benchmarks! { // * Transfer will create the recipient account. transfer { let existential_deposit = T::ExistentialDeposit::get(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); // Give some multiple of the existential deposit + creation fee + transfer fee let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); @@ -60,7 +60,7 @@ benchmarks! { // * Both accounts exist and will continue to exist. #[extra] transfer_best_case { - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); @@ -80,7 +80,7 @@ benchmarks! { // Benchmark `transfer_keep_alive` with the worst possible condition: // * The recipient account is created. transfer_keep_alive { - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 37dcd85b598..917988a825f 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -20,9 +20,13 @@ sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime", default-features = false } sp-std = { version = "2.0.0-rc5", path = "../../primitives/std", default-features = false } sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } +sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage", default-features = false } frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +[dev-dependencies] +hex-literal = "0.2.1" + [features] default = [ "std" ] std = [ diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 7ef274f25b1..cebdcbcfecd 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -32,6 +32,7 @@ pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::Zero; pub use frame_support; pub use paste; +pub use sp_storage::TrackedStorageKey; /// Construct pallet benchmarks for weighing dispatchables. /// @@ -418,156 +419,220 @@ macro_rules! benchmarks_iter { #[doc(hidden)] macro_rules! benchmark_backend { // parsing arms - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( PRE { $( $pre_parsed:tt )* } )* - } { $eval:block } { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( PRE { $( $pre_parsed:tt )* } )* } + { $eval:block } + { let $pre_id:tt : $pre_ty:ty = $pre_ex:expr; $( $rest:tt )* - } $postcode:block) => { + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( $common )* } + { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } - } { $eval } { $( $rest )* } $postcode + } + { $eval } + { $( $rest )* } + $postcode } }; - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } - } { $eval } { $( $rest )* } $postcode + } + { $eval } + { $( $rest )* } + $postcode } }; // mutation arm to look after defaulting to a common param - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in ...; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in ...; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { - $( { $common , $common_from , $common_to , $common_instancer } )* - } { - $( $parsed )* - } { $eval } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( { $common , $common_from , $common_to , $common_instancer } )* } + { $( $parsed )* } + { $eval } + { let $param in ({ $( let $common = $common_from; )* $param }) .. ({ $( let $common = $common_to; )* $param }) => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after defaulting only the range to common param - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in _ .. _ => $param_instancer:expr ; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in _ .. _ => $param_instancer:expr ; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { - $( { $common , $common_from , $common_to , $common_instancer } )* - } { - $( $parsed )* - } { $eval } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( { $common , $common_from , $common_to , $common_instancer } )* } + { $( $parsed )* } + { $eval } + { let $param in ({ $( let $common = $common_from; )* $param }) .. ({ $( let $common = $common_to; )* $param }) => $param_instancer ; $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after a single tt for param_from. - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $param in ( $param_from ) .. $param_to => $param_instancer; $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after the default tail of `=> ()` - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in $param_from:tt .. $param_to:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in $param_from:tt .. $param_to:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $param in $param_from .. $param_to => (); $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after `let _ =` - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $pre_id:tt = $pre_ex:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $pre_id:tt = $pre_ex:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $pre_id : _ = $pre_ex; $( $rest )* - } $postcode + } + $postcode } }; // actioning arm - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* - $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* - } { $eval:block } { $( $post:tt )* } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { + $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* + $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* + } + { $eval:block } + { $( $post:tt )* } + $postcode:block + ) => { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] @@ -726,7 +791,7 @@ macro_rules! impl_benchmark { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[Vec] + whitelist: &[$crate::TrackedStorageKey] ) -> Result, &'static str> { // Map the input to the selected benchmark. let extrinsic = sp_std::str::from_utf8(extrinsic) @@ -736,8 +801,14 @@ macro_rules! impl_benchmark { _ => return Err("Could not find extrinsic."), }; - // Add whitelist to DB - $crate::benchmarking::set_whitelist(whitelist.to_vec()); + // Add whitelist to DB including whitelisted caller + let mut whitelist = whitelist.to_vec(); + let whitelisted_caller_key = + as frame_support::storage::StorageMap<_,_>>::hashed_key_for( + $crate::whitelisted_caller::() + ); + whitelist.push(whitelisted_caller_key.into()); + $crate::benchmarking::set_whitelist(whitelist); // Warm up the DB $crate::benchmarking::commit_db(); @@ -947,19 +1018,25 @@ macro_rules! impl_benchmark_test { /// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); /// ``` /// -/// The `whitelist` is a `Vec>` of storage keys that you would like to skip for DB tracking. For example: +/// The `whitelist` is a parameter you pass to control the DB read/write tracking. +/// We use a vector of [TrackedStorageKey](./struct.TrackedStorageKey.html), which is a simple struct used to set +/// if a key has been read or written to. /// -/// ```ignore -/// let whitelist: Vec> = vec![ +/// For values that should be skipped entirely, we can just pass `key.into()`. For example: +/// +/// ``` +/// use frame_benchmarking::TrackedStorageKey; +/// let whitelist: Vec = vec![ /// // Block Number -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), /// // Total Issuance -/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), +/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), /// // Execution Phase -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), /// // Event Count -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), /// ]; +/// ``` /// /// Then define a mutable local variable to hold your `BenchmarkBatch` object: /// diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 6a4dc7eee4e..127645d4305 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -20,7 +20,6 @@ #![cfg(test)] use super::*; -use codec::Decode; use sp_std::prelude::*; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; use frame_support::{ @@ -64,12 +63,10 @@ pub trait OtherTrait { type OtherEvent; } -pub trait Trait: OtherTrait where Self::OtherEvent: Into { +pub trait Trait: frame_system::Trait + OtherTrait + where Self::OtherEvent: Into<::Event> +{ type Event; - type BlockNumber; - type AccountId: 'static + Default + Decode; - type Origin: From> + - Into, Self::Origin>>; } #[derive(Clone, Eq, PartialEq)] @@ -105,9 +102,6 @@ impl frame_system::Trait for Test { impl Trait for Test { type Event = (); - type BlockNumber = u32; - type Origin = Origin; - type AccountId = u64; } impl OtherTrait for Test { diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 5a2bd55ff79..8c25f035802 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -21,6 +21,7 @@ use codec::{Encode, Decode}; use sp_std::{vec::Vec, prelude::Box}; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeString; +use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] @@ -101,19 +102,52 @@ pub trait Benchmarking { self.commit() } - /// Get the read/write count + /// Get the read/write count. fn read_write_count(&self) -> (u32, u32, u32, u32) { self.read_write_count() } - /// Reset the read/write count + /// Reset the read/write count. fn reset_read_write_count(&mut self) { self.reset_read_write_count() } - fn set_whitelist(&mut self, new: Vec>) { + /// Get the DB whitelist. + fn get_whitelist(&self) -> Vec { + self.get_whitelist() + } + + /// Set the DB whitelist. + fn set_whitelist(&mut self, new: Vec) { self.set_whitelist(new) } + + // Add a new item to the DB whitelist. + fn add_to_whitelist(&mut self, add: TrackedStorageKey) { + let mut whitelist = self.get_whitelist(); + match whitelist.iter_mut().find(|x| x.key == add.key) { + // If we already have this key in the whitelist, update to be the most constrained value. + Some(item) => { + *item = TrackedStorageKey { + key: add.key, + has_been_read: item.has_been_read || add.has_been_read, + has_been_written: item.has_been_written || add.has_been_written, + } + }, + // If the key does not exist, add it. + None => { + whitelist.push(add); + } + } + self.set_whitelist(whitelist); + } + + // Remove an item from the DB whitelist. + fn remove_from_whitelist(&mut self, remove: Vec) { + let mut whitelist = self.get_whitelist(); + whitelist.retain(|x| x.key != remove); + self.set_whitelist(whitelist); + } } /// The pallet benchmarking trait. @@ -141,7 +175,7 @@ pub trait Benchmarking { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[Vec] + whitelist: &[TrackedStorageKey] ) -> Result, &'static str>; } @@ -165,3 +199,8 @@ pub fn account(name: &'static str, index: u32, seed let entropy = (name, index, seed).using_encoded(blake2_256); AccountId::decode(&mut &entropy[..]).unwrap_or_default() } + +/// This caller account is automatically whitelisted for DB reads/writes by the benchmarking macro. +pub fn whitelisted_caller() -> AccountId { + account::("whitelisted_caller", 0, 0) +} diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index b9558d8c8ce..2c777fadc4c 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin as SystemOrigin; use frame_system::EventRecord; -use frame_benchmarking::{benchmarks_instance, account}; +use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; @@ -123,7 +123,7 @@ benchmarks_instance! { members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; @@ -153,7 +153,7 @@ benchmarks_instance! { members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; @@ -184,7 +184,7 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; @@ -377,7 +377,7 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, MAX_MEMBERS)?; @@ -458,7 +458,7 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members( SystemOrigin::Root.into(), @@ -530,7 +530,7 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members( SystemOrigin::Root.into(), diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index a6b543bb43f..e8465c44cdc 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Indices; @@ -35,7 +35,7 @@ benchmarks! { // Index being claimed let i in 0 .. 1000; let account_index = T::AccountIndex::from(i); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); }: _(RawOrigin::Signed(caller.clone()), account_index) verify { @@ -47,7 +47,7 @@ benchmarks! { let i in 0 .. 1000; let account_index = T::AccountIndex::from(i); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", i, SEED); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); @@ -63,7 +63,7 @@ benchmarks! { let i in 0 .. 1000; let account_index = T::AccountIndex::from(i); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; @@ -93,7 +93,7 @@ benchmarks! { let i in 0 .. 1000; let account_index = T::AccountIndex::from(i); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 3cbe517dfd7..f68a2c3a4cd 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -21,14 +21,14 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Proxy; const SEED: u32 = 0; fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { - let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..n { Proxy::::add_proxy( @@ -50,35 +50,35 @@ benchmarks! { // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); // ... and "real" is the traditional caller. This is not a typo. - let real: T::AccountId = account("caller", 0, SEED); + let real: T::AccountId = whitelisted_caller(); let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) add_proxy { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller), account("target", T::MaxProxies::get().into(), SEED), T::ProxyType::default()) remove_proxy { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller), account("target", 0, SEED), T::ProxyType::default()) remove_proxies { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller)) anonymous { let p in ...; - }: _(RawOrigin::Signed(account("caller", 0, SEED)), T::ProxyType::default(), 0) + }: _(RawOrigin::Signed(whitelisted_caller()), T::ProxyType::default(), 0) kill_anonymous { let p in 0 .. (T::MaxProxies::get() - 2).into(); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Module::::anonymous(RawOrigin::Signed(account("caller", 0, SEED)).into(), T::ProxyType::default(), 0)?; + Module::::anonymous(RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), 0)?; let height = system::Module::::block_number(); let ext_index = system::Module::::extrinsic_index().unwrap_or(0); let anon = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index aab92ef4ce5..77eecb2ef04 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -23,7 +23,7 @@ use testing_utils::*; use sp_runtime::traits::One; use frame_system::RawOrigin; -pub use frame_benchmarking::{benchmarks, account}; +pub use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; @@ -280,7 +280,7 @@ benchmarks! { let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, true)?; let current_era = CurrentEra::get().unwrap(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let balance_before = T::Currency::free_balance(&validator); }: _(RawOrigin::Signed(caller), validator.clone(), current_era) verify { @@ -294,7 +294,7 @@ benchmarks! { let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, false)?; let current_era = CurrentEra::get().unwrap(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let balance_before = T::Currency::free_balance(&validator); }: payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era) verify { @@ -419,7 +419,7 @@ benchmarks! { let total_payout = T::Currency::minimum_balance() * 1000.into(); >::insert(current_era, total_payout); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: { for arg in payout_calls_arg { >::payout_stakers(RawOrigin::Signed(caller.clone()).into(), arg.0, arg.1)?; @@ -471,6 +471,10 @@ benchmarks! { let era = >::current_era().unwrap_or(0); let caller: T::AccountId = account("caller", n, SEED); + + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: { let result = >::submit_election_solution( RawOrigin::Signed(caller.clone()).into(), @@ -532,6 +536,10 @@ benchmarks! { let era = >::current_era().unwrap_or(0); let caller: T::AccountId = account("caller", n, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + // submit a very bad solution on-chain { // this is needed to fool the chain to accept this solution. @@ -584,6 +592,10 @@ benchmarks! { let caller: T::AccountId = account("caller", n, SEED); let era = >::current_era().unwrap_or(0); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + // submit a seq-phragmen with all the good stuff on chain. { let (winners, compact, score, size) = get_seq_phragmen_solution::(true); diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index a3e7797996a..653d9536f17 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -24,15 +24,13 @@ use sp_std::vec; use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::traits::Get; use frame_support::storage::{self, StorageMap}; use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; mod mock; -const SEED: u32 = 0; - pub struct Module(System); pub trait Trait: frame_system::Trait {} @@ -42,7 +40,7 @@ benchmarks! { remark { let b in 0 .. T::MaximumBlockLength::get(); let remark_message = vec![1; b as usize]; - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) set_heap_pages { @@ -139,7 +137,7 @@ benchmarks! { } suicide { - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); let account_info = AccountInfo:: { nonce: 1337.into(), refcount: 0, diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 9b1c976229e..1cd0f15ca01 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use sp_std::prelude::*; use frame_system::RawOrigin; use frame_support::{ensure, traits::OnFinalize}; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, TrackedStorageKey}; use crate::Module as Timestamp; @@ -34,8 +34,14 @@ benchmarks! { set { let t in 1 .. MAX_TIME; + // Ignore write to `DidUpdate` since it transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { + key: did_update_key, + has_been_read: false, + has_been_written: true, + }); }: _(RawOrigin::None, t.into()) - verify { ensure!(Timestamp::::now() == t.into(), "Time was not set."); } @@ -44,8 +50,10 @@ benchmarks! { let t in 1 .. MAX_TIME; Timestamp::::set(RawOrigin::None.into(), t.into())?; ensure!(DidUpdate::exists(), "Time was not set."); + // Ignore read/write to `DidUpdate` since it is transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); }: { Timestamp::::on_finalize(t.into()); } - verify { ensure!(!DidUpdate::exists(), "Time was not removed."); } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 8dddf3581ae..295326e1639 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use frame_support::traits::OnInitialize; use crate::Module as Treasury; @@ -45,7 +45,7 @@ fn setup_proposal(u: u32) -> ( // Create the pre-requisite information needed to create a `report_awesome`. fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let value = T::TipReportDepositBase::get() + T::TipReportDepositPerByte::get() * length.into() + T::Currency::minimum_balance(); @@ -116,6 +116,9 @@ benchmarks! { propose_spend { let u in 0 .. 1000; let (caller, value, beneficiary_lookup) = setup_proposal::(u); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), value, beneficiary_lookup) reject_proposal { @@ -143,6 +146,9 @@ benchmarks! { report_awesome { let r in 0 .. MAX_BYTES; let (caller, reason, awesome_person) = setup_awesome::(r); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), reason, awesome_person) retract_tip { @@ -155,6 +161,9 @@ benchmarks! { )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash) tip_new { @@ -162,6 +171,9 @@ benchmarks! { let t in 1 .. MAX_TIPPERS; let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), reason, beneficiary, value) tip { @@ -179,6 +191,9 @@ benchmarks! { ensure!(Tips::::contains_key(hash), "tip does not exist"); create_tips::(t - 1, hash.clone(), value)?; let caller = account("member", t - 1, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash, value) close_tip { @@ -206,6 +221,9 @@ benchmarks! { create_tips::(t, hash.clone(), value)?; let caller = account("caller", t, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash) on_initialize { diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 155a279807a..8ca0e216f28 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; @@ -43,7 +43,7 @@ benchmarks! { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); } - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), calls) verify { assert_last_event::(Event::BatchCompleted.into()) @@ -53,6 +53,9 @@ benchmarks! { let u in 0 .. 1000; let caller = account("caller", u, SEED); let call = Box::new(frame_system::Call::remark(vec![]).into()); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), u as u16, call) } diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 24cdc28c97f..974289aac32 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::{RawOrigin, Module as System}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Vesting; @@ -64,7 +64,7 @@ benchmarks! { vest_locked { let l in 0 .. MAX_LOCKS; - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); add_vesting_schedule::(&caller)?; @@ -88,7 +88,7 @@ benchmarks! { vest_unlocked { let l in 0 .. MAX_LOCKS; - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); add_vesting_schedule::(&caller)?; @@ -125,7 +125,7 @@ benchmarks! { "Vesting schedule not added", ); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { // Nothing happened since everything is still vested. @@ -152,7 +152,7 @@ benchmarks! { "Vesting schedule still active", ); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { // Vesting schedule is removed! @@ -166,7 +166,7 @@ benchmarks! { vested_transfer { let l in 0 .. MAX_LOCKS; - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 8e141867195..01570e0bfad 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -25,7 +25,7 @@ use std::any::{Any, TypeId}; -use sp_storage::ChildInfo; +use sp_storage::{ChildInfo, TrackedStorageKey}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -248,12 +248,19 @@ pub trait Externalities: ExtensionStore { /// Resets read/write count for the benchmarking process. fn reset_read_write_count(&mut self); + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Gets the current DB tracking whitelist. + fn get_whitelist(&self) -> Vec; + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// /// Adds new storage keys to the DB tracking whitelist. - fn set_whitelist(&mut self, new: Vec>); + fn set_whitelist(&mut self, new: Vec); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 16d5a14e889..f16000bff49 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -21,6 +21,7 @@ sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../external codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } +sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0-rc5", path = "test-wasm" } diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 259d3517f00..da57cf086be 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -537,3 +537,7 @@ impl PassBy for sp_wasm_interface::ValueType { impl PassBy for sp_wasm_interface::Value { type PassBy = Codec; } + +impl PassBy for sp_storage::TrackedStorageKey { + type PassBy = Codec; +} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9ec03c4d1e2..cfff2c6fc69 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -19,7 +19,10 @@ use hash_db::Hasher; use codec::{Decode, Encode}; -use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; +use sp_core::{ + traits::RuntimeCode, + storage::{ChildInfo, well_known_keys, TrackedStorageKey} +}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, @@ -226,10 +229,13 @@ pub trait Backend: std::fmt::Debug { unimplemented!() } - /// Update the whitelist for tracking db reads/writes - fn set_whitelist(&self, _: Vec>) { - unimplemented!() + /// Get the whitelist for tracking db reads/writes + fn get_whitelist(&self) -> Vec { + Default::default() } + + /// Update the whitelist for tracking db reads/writes + fn set_whitelist(&self, _: Vec) {} } impl<'a, T: Backend, H: Hasher> Backend for &'a T { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3ddf79dbd91..3db7a54750a 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -27,7 +27,7 @@ use sp_trie::trie_types::Layout; use sp_core::{ storage::{ well_known_keys::is_child_storage_key, Storage, - ChildInfo, StorageChild, + ChildInfo, StorageChild, TrackedStorageKey, }, traits::Externalities, Blake2Hasher, }; @@ -325,7 +325,11 @@ impl Externalities for BasicExternalities { unimplemented!("reset_read_write_count is not supported in Basic") } - fn set_whitelist(&mut self, _: Vec>) { + fn get_whitelist(&self) -> Vec { + unimplemented!("get_whitelist is not supported in Basic") + } + + fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in Basic") } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d7d4bc145eb..e57636b300a 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -26,7 +26,7 @@ use crate::{ use hash_db::Hasher; use sp_core::{ offchain::storage::OffchainOverlayedChanges, - storage::{well_known_keys::is_child_storage_key, ChildInfo}, + storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; @@ -609,7 +609,11 @@ where self.backend.reset_read_write_count() } - fn set_whitelist(&mut self, new: Vec>) { + fn get_whitelist(&self) -> Vec { + self.backend.get_whitelist() + } + + fn set_whitelist(&mut self, new: Vec) { self.backend.set_whitelist(new) } } diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index b8a35ced1eb..99023ec772e 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -24,7 +24,7 @@ use std::{ use crate::{Backend, StorageKey, StorageValue}; use hash_db::Hasher; use sp_core::{ - storage::ChildInfo, + storage::{ChildInfo, TrackedStorageKey}, traits::Externalities, Blake2Hasher, }; use codec::Encode; @@ -194,7 +194,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("reset_read_write_count is not supported in ReadOnlyExternalities") } - fn set_whitelist(&mut self, _: Vec>) { + fn get_whitelist(&self) -> Vec { + unimplemented!("get_whitelist is not supported in ReadOnlyExternalities") + } + + fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index cb7f2daa50e..46d76fd7d28 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -18,7 +18,8 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } ref-cast = "1.0.0" sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 073d80291c1..b253733e7b2 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -25,6 +25,7 @@ use sp_debug_derive::RuntimeDebug; use sp_std::{vec::Vec, ops::{Deref, DerefMut}}; use ref_cast::RefCast; +use codec::{Encode, Decode}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -34,6 +35,26 @@ pub struct StorageKey( pub Vec, ); +/// Storage key with read/write tracking information. +#[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] +pub struct TrackedStorageKey { + pub key: Vec, + pub has_been_read: bool, + pub has_been_written: bool, +} + +// Easily convert a key to a `TrackedStorageKey` that has been read and written to. +impl From> for TrackedStorageKey { + fn from(key: Vec) -> Self { + Self { + key: key, + has_been_read: true, + has_been_written: true, + } + } +} + /// Storage key of a child trie, it contains the prefix to the key. #[derive(PartialEq, Eq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] -- GitLab From 6e098a1078700c6793af055f2be417ecac0e8e41 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Thu, 20 Aug 2020 10:55:03 +0100 Subject: [PATCH 111/132] Merge Subkey into sc-cli (#4954) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * draft * revert * WIP * all that remains is tests * update Cargo.lock * tests WIP * WIP refactor node-template-runtime and node-runtime * implments sc_cli::RuntimeAdapter for node_template_runtime::Runtime * final draft * fix update_config for subcommands * proper AccountId decoding * test-runtime tests * revert * move RuntimeAdapter to cli-utils * use &'static str for TryFrom::<&'a str>::Error for Ss58AddressFormat * tests * add frame-system to sc-cli dev-dependencies * add frame-system to sc-cli dev-dependencies * fix ui test * wip * fixed inspect test * bump impl version * bump impl version, fixx spaces remove todos * pallet-balances-cli, rustc for some reason cannot resolve pallet_balances_cli in node-cli 😩 * wip * Subcommand::run takes &self * can't believe i missed that 🤦🏾‍♂️ * bump wasm-bindgen for some reason * adds key subcommand, rename generate-node-key to generate-node-id * cargo update and crossed fingers 🤞🏽 * update ui test * update more ui tests * should be all good now * revert subkey change * revert subkey change * adds frame-utilities-cli * Apply suggestions from code review Co-authored-by: Benjamin Kampmann * removes frame from sc-cli, fix license * my editor and ci disagrees on line width * bump spec version * turn off default features for parity-scale-codec * enable full_crypto feature for sp-core in cli-utils * merge frame-utilities-cli with pallet-balances-cli * remove full_crypto feature from sp_core in cli-utils * bump Cargo.lock * cli-utils -> frame-utils * rename BlockNumber to GenericNumber, fix spaces * fix spaces * construct additional_signed manually * sign test * remove unused vars * implement subkey with frame-utilities-cli and sc_cli * fix moduleid test * CI and clion disagree on line widths * adds associated Params type to SignedExtensionProvider * Apply suggestions from code review Co-authored-by: Benjamin Kampmann * move some code around * removes unneccesary generic params * moves module_id back to frame_utilities_cli * Apply suggestions from code review Co-authored-by: Benjamin Kampmann Co-authored-by: Bastian Köcher * remove print_ext * remove MaybeDisplay from pallet_balances::Trait::Balance * a lot of stuff tbh * adds ExtrasParamsBuilder * remove tests for ModuleIdCmd * address comments from PR * bump Cargo.lock * manually insert key into keystore * remove unnecessary SharedParams * add validation to vanity pattern, remove unused arg * remove SharedParams from Sign, Vanity, Verify * remove SharedParams from ModuleIdCmd, remove expect from Verify, new line to Cargo.toml * remove SharedParams from InsertCmd * 🤦🏾‍♂️ * deleted prometheus.yml * move a few things around * fix vanity test Co-authored-by: Benjamin Kampmann Co-authored-by: Bastian Köcher Co-authored-by: Benjamin Kampmann --- Cargo.lock | 1367 +++++++++++------ Cargo.toml | 1 + bin/node-template/node/src/cli.rs | 2 +- bin/node-template/node/src/command.rs | 4 +- bin/node/cli/Cargo.toml | 2 + bin/node/cli/src/cli.rs | 14 +- bin/node/cli/src/command.rs | 4 + bin/node/runtime/src/lib.rs | 29 +- bin/utils/subkey/Cargo.toml | 29 +- bin/utils/subkey/README.adoc | 4 +- bin/utils/subkey/src/lib.rs | 81 + bin/utils/subkey/src/main.rs | 812 +--------- bin/utils/subkey/src/rpc.rs | 51 - client/cli/Cargo.toml | 8 + client/cli/src/arg_enums.rs | 17 + client/cli/src/commands/export_blocks_cmd.rs | 6 +- client/cli/src/commands/generate.rs | 91 ++ client/cli/src/commands/generate_node_key.rs | 70 + client/cli/src/commands/insert.rs | 94 ++ client/cli/src/commands/inspect.rs | 95 ++ client/cli/src/commands/inspect_node_key.rs | 75 + client/cli/src/commands/key.rs | 61 + client/cli/src/commands/mod.rs | 53 +- client/cli/src/commands/revert_cmd.rs | 4 +- client/cli/src/commands/sign.rs | 98 ++ client/cli/src/commands/utils.rs | 233 +++ .../src => client/cli/src/commands}/vanity.rs | 199 +-- client/cli/src/commands/verify.rs | 104 ++ client/cli/src/error.rs | 5 + client/cli/src/lib.rs | 2 +- client/cli/src/params/keystore_params.rs | 22 +- client/cli/src/params/mod.rs | 67 +- frame/balances/src/lib.rs | 1 - .../ui/impl_incorrect_method_signature.stderr | 2 +- ...reference_in_impl_runtime_apis_call.stderr | 2 +- primitives/core/src/crypto.rs | 27 +- primitives/core/src/lib.rs | 1 + primitives/runtime/src/lib.rs | 2 +- test-utils/runtime/src/lib.rs | 1 + utils/frame/frame-utilities-cli/Cargo.toml | 22 + utils/frame/frame-utilities-cli/src/lib.rs | 23 + .../frame-utilities-cli/src/module_id.rs | 96 ++ 42 files changed, 2347 insertions(+), 1534 deletions(-) create mode 100644 bin/utils/subkey/src/lib.rs delete mode 100644 bin/utils/subkey/src/rpc.rs create mode 100644 client/cli/src/commands/generate.rs create mode 100644 client/cli/src/commands/generate_node_key.rs create mode 100644 client/cli/src/commands/insert.rs create mode 100644 client/cli/src/commands/inspect.rs create mode 100644 client/cli/src/commands/inspect_node_key.rs create mode 100644 client/cli/src/commands/key.rs create mode 100644 client/cli/src/commands/sign.rs create mode 100644 client/cli/src/commands/utils.rs rename {bin/utils/subkey/src => client/cli/src/commands}/vanity.rs (56%) create mode 100644 client/cli/src/commands/verify.rs create mode 100644 utils/frame/frame-utilities-cli/Cargo.toml create mode 100644 utils/frame/frame-utilities-cli/src/lib.rs create mode 100644 utils/frame/frame-utilities-cli/src/module_id.rs diff --git a/Cargo.lock b/Cargo.lock index c80c0557443..1b0291023e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,12 +25,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" -[[package]] -name = "adler32" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" - [[package]] name = "aead" version = "0.3.2" @@ -73,7 +67,7 @@ dependencies = [ "aes", "block-cipher", "ghash", - "subtle 2.2.2", + "subtle 2.2.3", ] [[package]] @@ -83,7 +77,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" dependencies = [ "block-cipher-trait", - "byteorder", + "byteorder 1.3.4", "opaque-debug 0.2.3", ] @@ -94,7 +88,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" dependencies = [ "block-cipher", - "byteorder", + "byteorder 1.3.4", "opaque-debug 0.2.3", ] @@ -136,9 +130,9 @@ checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" [[package]] name = "aho-corasick" -version = "0.7.10" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" dependencies = [ "memchr", ] @@ -160,7 +154,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -169,14 +163,14 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "anyhow" -version = "1.0.28" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff" +checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" [[package]] name = "approx" @@ -189,15 +183,15 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" +checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" [[package]] name = "arc-swap" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] name = "arrayref" @@ -258,6 +252,17 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +[[package]] +name = "async-channel" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee81ba99bee79f3c8ae114ae4baa7eaa326f63447cf2ec65e4393618b63f8770" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + [[package]] name = "async-std" version = "1.6.2" @@ -274,7 +279,7 @@ dependencies = [ "log", "memchr", "num_cpus", - "once_cell", + "once_cell 1.4.0", "pin-project-lite", "pin-utils", "slab", @@ -300,12 +305,29 @@ dependencies = [ "webpki-roots 0.19.0", ] +[[package]] +name = "async-trait" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "atomic" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + [[package]] name = "atty" version = "0.2.14" @@ -314,7 +336,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -338,7 +360,7 @@ dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide 0.4.0", + "miniz_oxide", "object 0.20.0", "rustc-demangle", ] @@ -363,26 +385,26 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "bincode" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" dependencies = [ - "byteorder", + "byteorder 1.3.4", "serde", ] [[package]] name = "bindgen" -version = "0.53.2" +version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb26d6a69a335b8cb0e7c7e9775cd5666611dc50a37177c3f2cedcfc040e8c8" +checksum = "c72a978d268b1d70b0e963217e60fdabd9523a941457a6c42a7315d15c7e89e5" dependencies = [ "bitflags", "cexpr", "cfg-if", "clang-sys", "clap", - "env_logger 0.7.1", + "env_logger", "lazy_static", "lazycell", "log", @@ -395,6 +417,21 @@ dependencies = [ "which", ] +[[package]] +name = "bip39" +version = "0.6.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" +dependencies = [ + "failure", + "hashbrown 0.1.8", + "hmac", + "once_cell 0.1.8", + "pbkdf2", + "rand 0.6.5", + "sha2 0.8.2", +] + [[package]] name = "bitflags" version = "1.2.1" @@ -424,7 +461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" dependencies = [ "byte-tools", - "byteorder", + "byteorder 1.3.4", "crypto-mac 0.8.0", "digest 0.9.0", "opaque-debug 0.2.3", @@ -470,7 +507,7 @@ checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ "block-padding", "byte-tools", - "byteorder", + "byteorder 1.3.4", "generic-array 0.12.3", ] @@ -512,13 +549,14 @@ dependencies = [ [[package]] name = "blocking" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d17efb70ce4421e351d61aafd90c16a20fb5bfe339fcdc32a86816280e62ce0" +checksum = "d2468ff7bf85066b4a3678fede6fe66db31846d753ff0adfbfab2c6a6e81612b" dependencies = [ - "futures-channel", - "futures-util", - "once_cell", + "async-channel", + "atomic-waker", + "futures-lite", + "once_cell 1.4.0", "parking", "waker-fn", ] @@ -531,9 +569,9 @@ checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", "memchr", @@ -552,9 +590,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.2.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "byte-slice-cast" @@ -568,6 +606,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" + [[package]] name = "byteorder" version = "1.3.4" @@ -580,16 +624,16 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "either", "iovec", ] [[package]] name = "bytes" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "c_linked_list" @@ -599,17 +643,17 @@ checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" [[package]] name = "cache-padded" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24508e28c677875c380c20f4d28124fab6f8ed4ef929a1397d7b1a31e92f1005" +checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cargo_metadata" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8de60b887edf6d74370fc8eb177040da4847d971d6234c7b13a6da324ef0caf" +checksum = "052dbdd9db69a339d5fa9ac87bfe2e1319f709119f0345988a597af82bb1011c" dependencies = [ - "semver 0.9.0", + "semver 0.10.0", "serde", "serde_derive", "serde_json", @@ -626,9 +670,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" dependencies = [ "jobserver", ] @@ -686,9 +730,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" +checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" dependencies = [ "js-sys", "num-integer", @@ -710,9 +754,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.33.0" +version = "2.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" +checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" dependencies = [ "ansi_term 0.11.0", "atty", @@ -734,9 +778,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.42" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fb25b677f8bf1eb325017cb6bb8452f87969db0fedb4f757b297bee78a7c62" +checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb" dependencies = [ "cc", ] @@ -833,7 +877,7 @@ version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" dependencies = [ - "byteorder", + "byteorder 1.3.4", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", @@ -948,16 +992,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc755679c12bda8e5523a71e4d654b6bf2e14bd838dfc48cde6559a05caf7d1" +checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" dependencies = [ "atty", "cast", "clap", - "criterion-plot 0.4.1", + "criterion-plot 0.4.3", "csv", - "itertools 0.8.2", + "itertools 0.9.0", "lazy_static", "num-traits", "oorandom", @@ -965,6 +1009,7 @@ dependencies = [ "rayon", "regex", "serde", + "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -977,19 +1022,19 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" dependencies = [ - "byteorder", + "byteorder 1.3.4", "cast", "itertools 0.8.2", ] [[package]] name = "criterion-plot" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01e15e0ea58e8234f96146b1f91fa9d0e4dd7a38da93ff7a75d42c0b9d3a545" +checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" dependencies = [ "cast", - "itertools 0.8.2", + "itertools 0.9.0", ] [[package]] @@ -1015,17 +1060,18 @@ dependencies = [ "lazy_static", "maybe-uninit", "memoffset", - "scopeguard", + "scopeguard 1.1.0", ] [[package]] name = "crossbeam-queue" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if", "crossbeam-utils", + "maybe-uninit", ] [[package]] @@ -1062,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle 2.2.3", ] [[package]] @@ -1098,9 +1144,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.13" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" +checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227" dependencies = [ "quote", "syn", @@ -1116,30 +1162,40 @@ dependencies = [ "stream-cipher 0.3.2", ] +[[package]] +name = "cuckoofilter" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" +dependencies = [ + "byteorder 0.5.3", + "rand 0.3.23", +] + [[package]] name = "curve25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" +checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" dependencies = [ - "byteorder", + "byteorder 1.3.4", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.2", + "subtle 2.2.3", "zeroize", ] [[package]] name = "data-encoding" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c0346158a19b3627234e15596f5e465c360fcdb97d817bcb255e0510f5a788" +checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" [[package]] name = "derive_more" -version = "0.99.5" +version = "0.99.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" +checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" dependencies = [ "proc-macro2", "quote", @@ -1182,14 +1238,13 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ - "cfg-if", "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1204,7 +1259,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ - "byteorder", + "byteorder 1.3.4", "quick-error", ] @@ -1260,7 +1315,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.1", + "sha2 0.8.2", "zeroize", ] @@ -1272,37 +1327,24 @@ checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" [[package]] name = "enumflags2" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a80e524ebf194285b57e5e7944018721c7fffc673253f5183f7accd88a2a3b0c" +checksum = "83c8d82922337cd23a15f88b70d8e4ef5f11da38dd7cdb55e84dd5de99695da0" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" +checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "env_logger" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.7.1" @@ -1324,22 +1366,22 @@ checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" [[package]] name = "erased-serde" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88b6d1705e16a4d62e05ea61cc0496c2bd190f4fa8e5c1f11ce747be6bcf3d1" +checksum = "6ca8b296792113e1500fd935ae487be6e00ce318952a6880555554824d6ebf38" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b480f641ccf0faf324e20c1d3e53d81b7484c698b42ea677f6907ae4db195371" +checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1361,7 +1403,7 @@ dependencies = [ "crunchy", "fixed-hash", "impl-rlp", - "impl-serde 0.3.0", + "impl-serde 0.3.1", "tiny-keccak 2.0.2", ] @@ -1374,11 +1416,17 @@ dependencies = [ "ethbloom", "fixed-hash", "impl-rlp", - "impl-serde 0.3.0", + "impl-serde 0.3.1", "primitive-types", "uint", ] +[[package]] +name = "event-listener" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "699d84875f1b72b4da017e6b0f77dfa88c0137f089958a88974d15938cbc2976" + [[package]] name = "evm" version = "0.17.0" @@ -1436,9 +1484,9 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" dependencies = [ "backtrace", "failure_derive", @@ -1446,9 +1494,9 @@ dependencies = [ [[package]] name = "failure_derive" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2", "quote", @@ -1470,9 +1518,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b90eb1dec02087df472ab9f0db65f27edaa654a746830042688bcc2eaf68090f" +checksum = "36a9cb09840f81cd211e435d00a4e487edd263dc3c8ff815c32dd76ad668ebed" [[package]] name = "fdlimit" @@ -1485,11 +1533,11 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505b75b31ef7285168dd237c4a7db3c1f3e0927e7d314e670bc98e854272fe9" +checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" dependencies = [ - "env_logger 0.6.2", + "env_logger", "log", ] @@ -1511,11 +1559,11 @@ dependencies = [ [[package]] name = "fixed-hash" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32529fc42e86ec06e5047092082aab9ad459b070c5d2a76b14f4f5ce70bf2e84" +checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "rand 0.7.3", "rustc-hex", "static_assertions", @@ -1529,22 +1577,22 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" +checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ "cfg-if", "crc32fast", "libc", "libz-sys", - "miniz_oxide 0.3.6", + "miniz_oxide", ] [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" @@ -1628,7 +1676,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "once_cell", + "once_cell 1.4.0", "parity-scale-codec", "parity-util-mem 0.7.0", "paste", @@ -1743,7 +1791,7 @@ dependencies = [ "lazy_static", "libc", "libloading", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1753,7 +1801,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1880,6 +1928,21 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +[[package]] +name = "futures-lite" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180d8fc9819eb48a0c976672fbeea13a73e10999e812bdc9e14644c25ad51d60" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.5" @@ -1904,7 +1967,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" dependencies = [ - "once_cell", + "once_cell 1.4.0", ] [[package]] @@ -1962,7 +2025,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "memchr", "pin-project", @@ -2097,7 +2160,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" dependencies = [ - "byteorder", + "byteorder 1.3.4", "bytes 0.4.12", "fnv", "futures 0.1.29", @@ -2111,23 +2174,29 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.1", "indexmap", - "log", "slab", - "tokio 0.2.21", + "tokio 0.2.22", "tokio-util", + "tracing", ] +[[package]] +name = "half" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" + [[package]] name = "hash-db" version = "0.15.2" @@ -2143,6 +2212,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hashbrown" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" +dependencies = [ + "byteorder 1.3.4", + "scopeguard 0.3.3", +] + [[package]] name = "hashbrown" version = "0.6.3" @@ -2155,9 +2234,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" +checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" dependencies = [ "ahash 0.3.8", "autocfg 1.0.0", @@ -2174,9 +2253,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ "libc", ] @@ -2199,9 +2278,9 @@ dependencies = [ [[package]] name = "hex-literal-impl" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4c5c844e2fee0bf673d54c2c177f1713b3d2af2ff6e666b49cb7572e6cf42d" +checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" dependencies = [ "proc-macro-hack", ] @@ -2255,7 +2334,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "itoa", ] @@ -2278,7 +2357,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "http 0.2.1", ] @@ -2329,25 +2408,25 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.4" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" +checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2 0.2.4", + "h2 0.2.6", "http 0.2.1", "http-body 0.3.1", "httparse", "itoa", - "log", - "net2", "pin-project", + "socket2", "time", - "tokio 0.2.21", + "tokio 0.2.22", "tower-service", + "tracing", "want 0.3.0", ] @@ -2357,14 +2436,14 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.4", + "hyper 0.13.7", "log", "rustls", "rustls-native-certs", - "tokio 0.2.21", + "tokio 0.2.22", "tokio-rustls", "webpki", ] @@ -2420,9 +2499,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" dependencies = [ "serde", ] @@ -2440,11 +2519,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" dependencies = [ "autocfg 1.0.0", + "hashbrown 0.8.1", "serde", ] @@ -2505,9 +2585,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "jobserver" @@ -2520,9 +2600,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" +checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" dependencies = [ "wasm-bindgen", ] @@ -2679,9 +2759,9 @@ dependencies = [ [[package]] name = "kv-log-macro" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c54d9f465d530a752e6ebdc217e081a7a614b48cb200f6f0aee21ba6bc9aabb" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ "log", ] @@ -2783,9 +2863,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" +checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9" [[package]] name = "libloading" @@ -2794,7 +2874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2803,6 +2883,45 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" +[[package]] +name = "libp2p" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0306a49ee6a89468f96089906f36b0eef82c988dcfc8acf3e2dcd6ad1c859f85" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.5", + "lazy_static", + "libp2p-core", + "libp2p-core-derive", + "libp2p-deflate", + "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", + "libp2p-mplex", + "libp2p-noise 0.21.0", + "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-request-response", + "libp2p-secio", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", + "libp2p-websocket", + "libp2p-yamux", + "multihash", + "parity-multiaddr", + "parking_lot 0.10.2", + "pin-project", + "smallvec 1.4.1", + "wasm-timer", +] + [[package]] name = "libp2p" version = "0.23.0" @@ -2810,7 +2929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1ebb6c031584a5af181fe3a1e4b074af5d0b1a3b31663200f0251f4bcff6b5c" dependencies = [ "atomic", - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "lazy_static", "libp2p-core", @@ -2820,7 +2939,7 @@ dependencies = [ "libp2p-kad", "libp2p-mdns", "libp2p-mplex", - "libp2p-noise", + "libp2p-noise 0.22.0", "libp2p-ping", "libp2p-secio", "libp2p-swarm", @@ -2862,7 +2981,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.1", + "sha2 0.8.2", "smallvec 1.4.1", "thiserror", "unsigned-varint 0.4.0", @@ -2880,6 +2999,17 @@ dependencies = [ "syn", ] +[[package]] +name = "libp2p-deflate" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abeff37fa533fead23fc71b14ed0a2aced36c0c65c3d0078aff07821fb71029e" +dependencies = [ + "flate2", + "futures 0.3.5", + "libp2p-core", +] + [[package]] name = "libp2p-dns" version = "0.20.0" @@ -2891,6 +3021,48 @@ dependencies = [ "log", ] +[[package]] +name = "libp2p-floodsub" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d4f310a02441b681075037ffb41649ee8836619559311b801ef3d5cdbe14cf" +dependencies = [ + "cuckoofilter", + "fnv", + "futures 0.3.5", + "libp2p-core", + "libp2p-swarm", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.4.1", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a70f76b6c53ae9c97c234498c799802e43f91766bcf4a2a1f94f9339617d713b" +dependencies = [ + "base64 0.11.0", + "byteorder 1.3.4", + "bytes 0.5.6", + "fnv", + "futures 0.3.5", + "futures_codec", + "libp2p-core", + "libp2p-swarm", + "log", + "lru 0.4.3", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.8.2", + "smallvec 1.4.1", + "unsigned-varint 0.4.0", + "wasm-timer", +] + [[package]] name = "libp2p-identify" version = "0.20.0" @@ -2914,7 +3086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44ed3a4c8111c570ab2bffb30c6353178d7603ce3787e3c5f2493c8d3d16d1f0" dependencies = [ "arrayvec 0.5.1", - "bytes 0.5.4", + "bytes 0.5.6", "either", "fnv", "futures 0.3.5", @@ -2926,7 +3098,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.1", + "sha2 0.8.2", "smallvec 1.4.1", "uint", "unsigned-varint 0.4.0", @@ -2962,7 +3134,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14ae0ffacd30f073f96cd518b2c9cd2cb18ac27c3d136a4b23cf1af99f33e541" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures 0.3.5", "futures_codec", @@ -2972,40 +3144,108 @@ dependencies = [ "unsigned-varint 0.4.0", ] +[[package]] +name = "libp2p-noise" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f353f8966bbaaf7456535fffd3f366f153148773a0cf04b2ec3860955cb720e" +dependencies = [ + "bytes 0.5.6", + "curve25519-dalek", + "futures 0.3.5", + "lazy_static", + "libp2p-core", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.8.2", + "snow", + "static_assertions", + "x25519-dalek", + "zeroize", +] + [[package]] name = "libp2p-noise" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e594f2de0c23c2b7ad14802c991a2e68e95315c6a6c7715e53801506f20135d" +checksum = "1e594f2de0c23c2b7ad14802c991a2e68e95315c6a6c7715e53801506f20135d" +dependencies = [ + "bytes 0.5.6", + "curve25519-dalek", + "futures 0.3.5", + "lazy_static", + "libp2p-core", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.8.2", + "snow", + "static_assertions", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-ping" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70130cf130e4ba6dc177366e72dd9f86f9e3588fa1a0c4145247e676f16affad" +dependencies = [ + "futures 0.3.5", + "libp2p-core", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53f0308a97f6fdd37a2bc388070e471c3ce9d92aa45c99d75c87c2dc5d5cac96" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.5", + "futures_codec", + "libp2p-core", + "log", + "prost", + "prost-build", + "rw-stream-sink", + "unsigned-varint 0.4.0", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d0db10e139d22d7af0b23ed7949449ec86262798aa0fd01595abdbcb02dc87" dependencies = [ - "bytes 0.5.4", - "curve25519-dalek", "futures 0.3.5", - "lazy_static", - "libp2p-core", "log", - "prost", - "prost-build", + "pin-project", "rand 0.7.3", - "sha2 0.8.1", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", + "salsa20", + "sha3", ] [[package]] -name = "libp2p-ping" -version = "0.20.0" +name = "libp2p-request-response" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70130cf130e4ba6dc177366e72dd9f86f9e3588fa1a0c4145247e676f16affad" +checksum = "6f48682b48a96545a323edd150c1d64fc1e250240bba02866e9f902e3dc032a9" dependencies = [ + "async-trait", "futures 0.3.5", "libp2p-core", "libp2p-swarm", - "log", - "rand 0.7.3", - "void", + "smallvec 1.4.1", "wasm-timer", ] @@ -3031,7 +3271,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.1", + "sha2 0.8.2", "static_assertions", "twofish", "wasm-bindgen", @@ -3070,6 +3310,18 @@ dependencies = [ "socket2", ] +[[package]] +name = "libp2p-uds" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9db9fce9e3588c3118475d9ca761c5c133b639a624a7341e2a61e4b28c376b8" +dependencies = [ + "async-std", + "futures 0.3.5", + "libp2p-core", + "log", +] + [[package]] name = "libp2p-wasm-ext" version = "0.20.1" @@ -3140,8 +3392,8 @@ dependencies = [ "digest 0.8.1", "hmac-drbg", "rand 0.7.3", - "sha2 0.8.1", - "subtle 2.2.2", + "sha2 0.8.2", + "subtle 2.2.3", "typenum", ] @@ -3159,15 +3411,15 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" +checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" [[package]] name = "linked_hash_set" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7c91c4c7bbeb4f2f7c4e5be11e6a05bd6830bc37249c47ce1ad86ad453ff9c" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", ] @@ -3185,27 +3437,45 @@ dependencies = [ [[package]] name = "lite-json" -version = "0.1.0" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c73e713a23ac6e12074c9e96ef2dfb770921e0cb9244c093bd38424209e0e523" +dependencies = [ + "lite-parser", +] + +[[package]] +name = "lite-parser" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa835713bb12ba5204013497da16caf2dd2eee25ca829d0efaa054fb38c4ddd" +checksum = "0c50092e40e0ccd1bf2015a10333fde0502ff95b832b0895dc1ca0d7ac6c52f6" dependencies = [ "paste", ] +[[package]] +name = "lock_api" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" +dependencies = [ + "scopeguard 0.3.3", +] + [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard", + "scopeguard 1.1.0", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", ] @@ -3280,14 +3550,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "memoffset" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ "autocfg 1.0.0", ] @@ -3299,7 +3569,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" dependencies = [ "hash-db", - "hashbrown 0.8.0", + "hashbrown 0.8.1", "parity-util-mem 0.7.0", ] @@ -3315,21 +3585,12 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ - "byteorder", + "byteorder 1.3.4", "keccak", "rand_core 0.5.1", "zeroize", ] -[[package]] -name = "miniz_oxide" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" -dependencies = [ - "adler32", -] - [[package]] name = "miniz_oxide" version = "0.4.0" @@ -3341,9 +3602,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ "cfg-if", "fuchsia-zircon", @@ -3372,21 +3633,21 @@ dependencies = [ [[package]] name = "mio-named-pipes" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" +checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", "miow 0.3.5", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "mio-uds" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", @@ -3412,7 +3673,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" dependencies = [ "socket2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3423,15 +3684,15 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multihash" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae32179a9904ccc6e063de8beee7f5dd55fae85ecb851ca923d55722bc28cf5d" +checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" dependencies = [ "blake2b_simd", "blake2s_simd", "digest 0.8.1", "sha-1", - "sha2 0.8.1", + "sha2 0.8.2", "sha3", "unsigned-varint 0.3.3", ] @@ -3448,7 +3709,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "log", "pin-project", @@ -3484,13 +3745,13 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3551,7 +3812,7 @@ dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "jsonrpc-core", - "libp2p", + "libp2p 0.23.0", "node-cli", "sc-rpc-api", "serde", @@ -3631,6 +3892,7 @@ dependencies = [ "structopt", "substrate-browser-utils", "substrate-build-script-utils", + "substrate-frame-cli", "tempfile", "tracing", "wasm-bindgen", @@ -3641,7 +3903,7 @@ dependencies = [ name = "node-executor" version = "2.0.0-rc5" dependencies = [ - "criterion 0.3.1", + "criterion 0.3.3", "frame-benchmarking", "frame-support", "frame-system", @@ -3732,7 +3994,7 @@ dependencies = [ name = "node-rpc-client" version = "2.0.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "futures 0.1.29", "hyper 0.12.35", "jsonrpc-core-client", @@ -3880,7 +4142,7 @@ dependencies = [ name = "node-testing" version = "2.0.0-rc5" dependencies = [ - "criterion 0.3.1", + "criterion 0.3.3", "frame-support", "frame-system", "fs_extra", @@ -3936,9 +4198,9 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "5.1.1" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ "memchr", "version_check", @@ -3967,9 +4229,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" dependencies = [ "autocfg 1.0.0", "num-traits", @@ -3989,9 +4251,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" dependencies = [ "autocfg 1.0.0", "libm", @@ -3999,9 +4261,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", @@ -4026,18 +4288,27 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.3.1" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" +checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" dependencies = [ - "parking_lot 0.9.0", + "parking_lot 0.7.1", +] + +[[package]] +name = "once_cell" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +dependencies = [ + "parking_lot 0.10.2", ] [[package]] name = "oorandom" -version = "11.1.0" +version = "11.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcec7c9c2a95cacc7cd0ecb89d8a8454eca13906f6deb55258ffff0adeb9405" +checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" [[package]] name = "opaque-debug" @@ -4063,7 +4334,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4751,7 +5022,7 @@ dependencies = [ name = "pallet-staking" version = "2.0.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "frame-benchmarking", "frame-support", "frame-system", @@ -4975,7 +5246,7 @@ checksum = "cc20af3143a62c16e7c9e92ea5c6ae49f7d271d97d4d8fe73afc28f0514a3d0f" dependencies = [ "arrayref", "bs58", - "byteorder", + "byteorder 1.3.4", "data-encoding", "multihash", "percent-encoding 2.1.0", @@ -5032,7 +5303,7 @@ dependencies = [ "tokio 0.1.22", "tokio-named-pipes", "tokio-uds", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5045,7 +5316,7 @@ dependencies = [ "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot 0.10.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5056,14 +5327,14 @@ checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if", "ethereum-types", - "hashbrown 0.8.0", + "hashbrown 0.8.1", "impl-trait-for-tuples", "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", "smallvec 1.4.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5083,7 +5354,7 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" dependencies = [ - "byteorder", + "byteorder 1.3.4", ] [[package]] @@ -5094,9 +5365,19 @@ checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" [[package]] name = "parking" -version = "1.0.3" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d4a6da31f8144a32532fe38fe8fb439a6842e0ec633f0037f0144c14e7f907" + +[[package]] +name = "parking_lot" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4029bc3504a62d92e42f30b9095fdef73b8a0b2a06aa41ce2935143b05a1a06" +checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" +dependencies = [ + "lock_api 0.1.5", + "parking_lot_core 0.4.0", +] [[package]] name = "parking_lot" @@ -5104,7 +5385,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ - "lock_api", + "lock_api 0.3.4", "parking_lot_core 0.6.2", "rustc_version", ] @@ -5115,8 +5396,21 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api", - "parking_lot_core 0.7.1", + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] + +[[package]] +name = "parking_lot_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" +dependencies = [ + "libc", + "rand 0.6.5", + "rustc_version", + "smallvec 0.6.13", + "winapi 0.3.9", ] [[package]] @@ -5131,28 +5425,28 @@ dependencies = [ "redox_syscall", "rustc_version", "smallvec 0.6.13", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ "cfg-if", "cloudabi", "libc", "redox_syscall", "smallvec 1.4.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "paste" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4fb1930692d1b6a9cfabdde3d06ea0a7d186518e2f4d67660d8970e2fa647a" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" dependencies = [ "paste-impl", "proc-macro-hack", @@ -5160,14 +5454,11 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62486e111e571b1e93b710b61e8f493c0013be39629b714cb166bdb06aa5a8a" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" dependencies = [ "proc-macro-hack", - "proc-macro2", - "quote", - "syn", ] [[package]] @@ -5176,8 +5467,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ - "byteorder", + "byteorder 1.3.4", "crypto-mac 0.7.0", + "rayon", ] [[package]] @@ -5206,9 +5498,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "petgraph" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c127eea4a29ec6c85d153c59dc1213f33ec74cead30fe4730aecc88cc1fd92" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" dependencies = [ "fixedbitset", "indexmap", @@ -5236,9 +5528,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" [[package]] name = "pin-utils" @@ -5248,9 +5540,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "platforms" @@ -5260,9 +5552,9 @@ checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" [[package]] name = "plotters" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" +checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" dependencies = [ "js-sys", "num-traits", @@ -5291,15 +5583,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "predicates" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "347a1b6f0b21e636bc9872fb60b83b8e185f6f5516298b8238699f7f9a531030" +checksum = "96bfead12e90dccead362d62bb2c90a5f6fc4584963645bc7f71a735e0b0735a" dependencies = [ "difference", "predicates-core", @@ -5335,31 +5627,31 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e4b9943a2da369aec5e96f7c10ebc74fcf434d39590d974b0a3460e6f67fbb" +checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" dependencies = [ "fixed-hash", "impl-codec", "impl-rlp", - "impl-serde 0.3.0", + "impl-serde 0.3.1", "uint", ] [[package]] name = "proc-macro-crate" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ "toml", ] [[package]] name = "proc-macro-error" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678" +checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" dependencies = [ "proc-macro-error-attr", "proc-macro2", @@ -5370,9 +5662,9 @@ dependencies = [ [[package]] name = "proc-macro-error-attr" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" +checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" dependencies = [ "proc-macro2", "quote", @@ -5383,21 +5675,21 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" [[package]] name = "proc-macro-nested" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" dependencies = [ "unicode-xid", ] @@ -5421,7 +5713,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "prost-derive", ] @@ -5431,7 +5723,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "heck", "itertools 0.8.2", "log", @@ -5462,7 +5754,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "prost", ] @@ -5472,7 +5764,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" dependencies = [ - "byteorder", + "byteorder 1.3.4", "log", "parity-wasm 0.41.0", ] @@ -5489,7 +5781,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", "rand 0.7.3", "rand_core 0.5.1", @@ -5508,9 +5800,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ "proc-macro2", ] @@ -5541,7 +5833,7 @@ dependencies = [ "libc", "rand_core 0.3.1", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5554,7 +5846,7 @@ dependencies = [ "fuchsia-cprng", "libc", "rand_core 0.3.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5573,7 +5865,7 @@ dependencies = [ "rand_os", "rand_pcg 0.1.2", "rand_xorshift", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5669,7 +5961,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5684,7 +5976,7 @@ dependencies = [ "rand_core 0.4.2", "rdrand", "wasm-bindgen", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5721,7 +6013,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03b418169fb9c46533f326efd6eed2576699c44ca92d3052a066214a8d828929" dependencies = [ - "byteorder", + "byteorder 1.3.4", "rand_core 0.3.1", ] @@ -5744,10 +6036,11 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098" +checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" dependencies = [ + "autocfg 1.0.0", "crossbeam-deque", "either", "rayon-core", @@ -5755,9 +6048,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9" +checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" dependencies = [ "crossbeam-deque", "crossbeam-queue", @@ -5777,9 +6070,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" @@ -5794,18 +6087,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a214c7875e1b63fc1618db7c80efc0954f6156c9ff07699fd9039e255accdd1" +checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" +checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ "proc-macro2", "quote", @@ -5825,9 +6118,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.6" +version = "1.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ "aho-corasick", "memchr", @@ -5841,15 +6134,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ - "byteorder", + "byteorder 1.3.4", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" [[package]] name = "region" @@ -5860,16 +6153,16 @@ dependencies = [ "bitflags", "libc", "mach", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "remove_dir_all" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5901,17 +6194,17 @@ checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" [[package]] name = "ring" -version = "0.16.12" +version = "0.16.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", - "lazy_static", "libc", + "once_cell 1.4.0", "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5951,7 +6244,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6020,9 +6313,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" +checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" dependencies = [ "proc-macro2", "quote", @@ -6042,9 +6335,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "safe-mix" @@ -6055,6 +6348,26 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "salsa20" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2324b0e8c3bb9a586a571fdb3136f70e7e2c748de00a78043f86e0cff91f91fe" +dependencies = [ + "byteorder 1.3.4", + "salsa20-core", + "stream-cipher 0.3.2", +] + +[[package]] +name = "salsa20-core" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fe6cc1b9f5a5867853ade63099de70f042f7679e408d1ffe52821c9248e6e69" +dependencies = [ + "stream-cipher 0.3.2", +] + [[package]] name = "same-file" version = "1.0.6" @@ -6068,12 +6381,12 @@ dependencies = [ name = "sc-authority-discovery" version = "0.8.0-rc5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "derive_more", - "env_logger 0.7.1", + "env_logger", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p", + "libp2p 0.23.0", "log", "parity-scale-codec", "prost", @@ -6169,28 +6482,36 @@ version = "0.8.0-rc5" dependencies = [ "ansi_term 0.12.1", "atty", + "bip39", "chrono", "derive_more", - "env_logger 0.7.1", + "env_logger", "fdlimit", "futures 0.3.5", + "hex", "lazy_static", + "libp2p 0.22.0", "log", "names", "nix", + "parity-scale-codec", "parity-util-mem 0.7.0", + "rand 0.7.3", "regex", "rpassword", "sc-client-api", "sc-informant", + "sc-keystore", "sc-network", "sc-service", "sc-telemetry", "sc-tracing", "serde", "serde_json", + "sp-application-crypto", "sp-blockchain", "sp-core", + "sp-io", "sp-keyring", "sp-panic-handler", "sp-runtime", @@ -6201,7 +6522,7 @@ dependencies = [ "substrate-prometheus-endpoint", "tempfile", "time", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -6247,7 +6568,7 @@ name = "sc-client-db" version = "0.8.0-rc5" dependencies = [ "blake2-rfc", - "env_logger 0.7.1", + "env_logger", "hash-db", "kvdb 0.7.0", "kvdb-memorydb 0.7.0", @@ -6291,7 +6612,7 @@ name = "sc-consensus-aura" version = "0.8.0-rc5" dependencies = [ "derive_more", - "env_logger 0.7.1", + "env_logger", "futures 0.3.5", "futures-timer 3.0.2", "log", @@ -6329,7 +6650,7 @@ name = "sc-consensus-babe" version = "0.8.0-rc5" dependencies = [ "derive_more", - "env_logger 0.7.1", + "env_logger", "fork-tree", "futures 0.3.5", "futures-timer 3.0.2", @@ -6423,7 +6744,7 @@ version = "0.8.0-rc5" dependencies = [ "assert_matches", "derive_more", - "env_logger 0.7.1", + "env_logger", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", @@ -6444,7 +6765,7 @@ dependencies = [ "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -6596,7 +6917,7 @@ version = "0.8.0-rc5" dependencies = [ "assert_matches", "derive_more", - "env_logger 0.7.1", + "env_logger", "finality-grandpa", "fork-tree", "futures 0.3.5", @@ -6632,7 +6953,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -6693,7 +7014,7 @@ dependencies = [ "serde_json", "sp-application-crypto", "sp-core", - "subtle 2.2.2", + "subtle 2.2.3", "tempfile", ] @@ -6723,10 +7044,10 @@ dependencies = [ "async-std", "bitflags", "bs58", - "bytes 0.5.4", + "bytes 0.5.6", "derive_more", "either", - "env_logger 0.7.1", + "env_logger", "erased-serde", "fnv", "fork-tree", @@ -6735,7 +7056,7 @@ dependencies = [ "futures_codec", "hex", "ip_network", - "libp2p", + "libp2p 0.23.0", "linked-hash-map", "linked_hash_set", "log", @@ -6782,7 +7103,7 @@ dependencies = [ "async-std", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p", + "libp2p 0.23.0", "log", "lru 0.4.3", "quickcheck", @@ -6797,10 +7118,10 @@ dependencies = [ name = "sc-network-test" version = "0.8.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p", + "libp2p 0.23.0", "log", "parking_lot 0.10.2", "rand 0.7.3", @@ -6823,12 +7144,12 @@ dependencies = [ name = "sc-offchain" version = "2.0.0-rc5" dependencies = [ - "bytes 0.5.4", - "env_logger 0.7.1", + "bytes 0.5.6", + "env_logger", "fnv", "futures 0.3.5", "futures-timer 3.0.2", - "hyper 0.13.4", + "hyper 0.13.7", "hyper-rustls", "lazy_static", "log", @@ -6849,7 +7170,7 @@ dependencies = [ "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -6857,7 +7178,7 @@ name = "sc-peerset" version = "2.0.0-rc5" dependencies = [ "futures 0.3.5", - "libp2p", + "libp2p 0.23.0", "log", "rand 0.7.3", "serde_json", @@ -7024,7 +7345,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", "tracing", "wasm-timer", ] @@ -7033,7 +7354,7 @@ dependencies = [ name = "sc-service-test" version = "2.0.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "fdlimit", "futures 0.1.29", "futures 0.3.5", @@ -7069,7 +7390,7 @@ dependencies = [ name = "sc-state-db" version = "0.8.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", "parity-scale-codec", "parity-util-mem 0.7.0", @@ -7085,7 +7406,7 @@ version = "2.0.0-rc5" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", - "libp2p", + "libp2p 0.23.0", "log", "parking_lot 0.10.2", "pin-project", @@ -7121,7 +7442,7 @@ name = "sc-transaction-graph" version = "2.0.0-rc5" dependencies = [ "assert_matches", - "criterion 0.3.1", + "criterion 0.3.3", "derive_more", "futures 0.3.5", "linked-hash-map", @@ -7174,12 +7495,12 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7195,8 +7516,8 @@ dependencies = [ "merlin", "rand 0.7.3", "rand_core 0.5.1", - "sha2 0.8.1", - "subtle 2.2.2", + "sha2 0.8.2", + "subtle 2.2.3", "zeroize", ] @@ -7206,6 +7527,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +[[package]] +name = "scopeguard" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" + [[package]] name = "scopeguard" version = "1.1.0" @@ -7223,9 +7550,9 @@ dependencies = [ [[package]] name = "scroll_derive" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ "proc-macro2", "quote", @@ -7288,6 +7615,15 @@ name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" dependencies = [ "semver-parser", "serde", @@ -7326,6 +7662,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_cbor" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.114" @@ -7362,9 +7708,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ "block-buffer 0.7.3", "digest 0.8.1", @@ -7507,12 +7853,12 @@ dependencies = [ "futures-io", "futures-util", "libc", - "once_cell", + "once_cell 1.4.0", "scoped-tls", "slab", "socket2", "wepoll-sys-stjepang", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7529,7 +7875,7 @@ dependencies = [ "ring", "rustc_version", "sha2 0.9.1", - "subtle 2.2.2", + "subtle 2.2.3", "x25519-dalek", ] @@ -7542,7 +7888,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7552,7 +7898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" dependencies = [ "base64 0.12.3", - "bytes 0.5.4", + "bytes 0.5.6", "flate2", "futures 0.3.5", "httparse", @@ -7602,7 +7948,7 @@ dependencies = [ name = "sp-api-test" version = "2.0.0-rc5" dependencies = [ - "criterion 0.3.1", + "criterion 0.3.3", "parity-scale-codec", "rustversion", "sc-block-builder", @@ -7643,7 +7989,7 @@ dependencies = [ name = "sp-arithmetic" version = "2.0.0-rc5" dependencies = [ - "criterion 0.3.1", + "criterion 0.3.3", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -7729,7 +8075,7 @@ dependencies = [ "derive_more", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p", + "libp2p 0.23.0", "log", "parity-scale-codec", "parking_lot 0.10.2", @@ -7815,7 +8161,7 @@ version = "2.0.0-rc5" dependencies = [ "base58", "blake2-rfc", - "byteorder", + "byteorder 1.3.4", "criterion 0.2.11", "derive_more", "dyn-clonable", @@ -7825,7 +8171,7 @@ dependencies = [ "hash256-std-hasher", "hex", "hex-literal", - "impl-serde 0.3.0", + "impl-serde 0.3.1", "lazy_static", "libsecp256k1", "log", @@ -7843,7 +8189,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "sha2 0.8.1", + "sha2 0.8.2", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -8292,9 +8638,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stable_deref_trait" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "static_assertions" @@ -8346,9 +8692,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6da2e8d107dfd7b74df5ef4d205c6aebee0706c647f6bc6a2d5789905c00fb" +checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" dependencies = [ "clap", "lazy_static", @@ -8357,9 +8703,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a489c87c08fbaf12e386665109dd13470dcc9c4583ea3e10dd2b4523e5ebd9ac" +checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" dependencies = [ "heck", "proc-macro-error", @@ -8393,30 +8739,13 @@ dependencies = [ name = "subkey" version = "2.0.0-rc5" dependencies = [ - "clap", - "derive_more", "frame-system", - "futures 0.1.29", - "hex", - "hex-literal", - "hyper 0.12.35", - "itertools 0.8.2", - "jsonrpc-core-client", - "libp2p", "node-primitives", "node-runtime", - "pallet-balances", - "pallet-grandpa", - "pallet-transaction-payment", - "parity-scale-codec", - "rand 0.7.3", - "rpassword", - "sc-rpc", - "serde_json", + "sc-cli", "sp-core", - "sp-runtime", - "substrate-bip39", - "tiny-bip39", + "structopt", + "substrate-frame-cli", ] [[package]] @@ -8428,7 +8757,7 @@ dependencies = [ "hmac", "pbkdf2", "schnorrkel", - "sha2 0.8.1", + "sha2 0.8.2", ] [[package]] @@ -8463,6 +8792,17 @@ dependencies = [ "platforms", ] +[[package]] +name = "substrate-frame-cli" +version = "2.0.0-rc5" +dependencies = [ + "frame-system", + "sc-cli", + "sp-core", + "sp-runtime", + "structopt", +] + [[package]] name = "substrate-frame-rpc-support" version = "2.0.0-rc5" @@ -8476,14 +8816,14 @@ dependencies = [ "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "substrate-frame-rpc-system" version = "2.0.0-rc5" dependencies = [ - "env_logger 0.7.1", + "env_logger", "frame-system-rpc-runtime-api", "futures 0.3.5", "jsonrpc-core", @@ -8511,10 +8851,10 @@ dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.4", + "hyper 0.13.7", "log", "prometheus", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -8627,7 +8967,7 @@ dependencies = [ "futures 0.3.5", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.21", + "tokio 0.2.22", "trybuild", ] @@ -8646,7 +8986,7 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -8676,15 +9016,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" +checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" dependencies = [ "proc-macro2", "quote", @@ -8704,9 +9044,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2", "quote", @@ -8737,7 +9077,7 @@ dependencies = [ "rand 0.7.3", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8773,18 +9113,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.15" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b3d3d2ff68104100ab257bb6bb0cb26c901abe4bd4ba15961f3bf867924012" +checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.15" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd" +checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2", "quote", @@ -8802,22 +9142,21 @@ dependencies = [ [[package]] name = "threadpool" -version = "1.7.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2f0c90a5f3459330ac8bc0d2f879c693bb7a2f59689c1083fc4ef83834da865" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" dependencies = [ "num_cpus", ] [[package]] name = "time" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8828,11 +9167,11 @@ checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" dependencies = [ "failure", "hmac", - "once_cell", + "once_cell 1.4.0", "pbkdf2", "rand 0.7.3", "rustc-hash", - "sha2 0.8.1", + "sha2 0.8.2", "unicode-normalization", ] @@ -8856,14 +9195,20 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3c6667d3e65eb1bc3aed6fd14011c6cbc3a0665218ab7f5daf040b9ec371a" +checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" dependencies = [ "serde", "serde_json", ] +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + [[package]] name = "tokio" version = "0.1.22" @@ -8890,11 +9235,11 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures-core", "iovec", @@ -8908,7 +9253,7 @@ dependencies = [ "signal-hook-registry", "slab", "tokio-macros", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9037,7 +9382,7 @@ checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" dependencies = [ "futures-core", "rustls", - "tokio 0.2.21", + "tokio 0.2.22", "webpki", ] @@ -9131,9 +9476,9 @@ dependencies = [ [[package]] name = "tokio-uds" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" +checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", "futures 0.1.29", @@ -9153,12 +9498,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -9183,6 +9528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" dependencies = [ "cfg-if", + "log", "tracing-attributes", "tracing-core", ] @@ -9277,7 +9623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" dependencies = [ "hash-db", - "hashbrown 0.8.0", + "hashbrown 0.8.1", "log", "rustc-hex", "smallvec 1.4.1", @@ -9304,15 +9650,15 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" -version = "1.0.25" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459186ab1afd6d93bd23c2269125f4f7694f8771fe0e64434b4bdc212b94034d" +checksum = "bbe777c4e2060f44d83892be1189f96200be8ed3d99569d5c2d5ee26e62c0ea9" dependencies = [ "dissimilar", "glob 0.3.0", @@ -9330,7 +9676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1" dependencies = [ "block-cipher-trait", - "byteorder", + "byteorder 1.3.4", "opaque-debug 0.2.3", ] @@ -9351,11 +9697,11 @@ checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" [[package]] name = "uint" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" +checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" dependencies = [ - "byteorder", + "byteorder 1.3.4", "crunchy", "rustc-hex", "static_assertions", @@ -9381,11 +9727,11 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "smallvec 1.4.1", + "tinyvec", ] [[package]] @@ -9396,15 +9742,15 @@ checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "universal-hash" @@ -9413,7 +9759,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle 2.2.3", ] [[package]] @@ -9428,7 +9774,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-io", "futures-util", "futures_codec", @@ -9436,9 +9782,9 @@ dependencies = [ [[package]] name = "untrusted" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" @@ -9464,21 +9810,21 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "vec_map" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" [[package]] name = "void" @@ -9500,9 +9846,9 @@ dependencies = [ [[package]] name = "wabt-sys" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7043ebb3e5d96fad7a8d3ca22ee9880748ff8c3e18092cfb2a49d3b8f9084" +checksum = "01c695f98f7eb81fd4e2f6b65301ccc916a950dc2265eeefc4d376b34ce666df" dependencies = [ "cc", "cmake", @@ -9531,7 +9877,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] @@ -9591,9 +9937,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" dependencies = [ "cfg-if", "js-sys", @@ -9632,9 +9978,9 @@ checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" [[package]] name = "wasm-bindgen-test" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648da3460c6d2aa04b715a936329e2e311180efe650b2127d6267f4193ccac14" +checksum = "fd8e9dad8040e378f0696b017570c6bc929aac373180e06b3d67ac5059c52da3" dependencies = [ "console_error_panic_hook", "js-sys", @@ -9646,9 +9992,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467" +checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" dependencies = [ "proc-macro2", "quote", @@ -9739,7 +10085,7 @@ dependencies = [ "wasmtime-profiling", "wasmtime-runtime", "wat", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9781,11 +10127,11 @@ dependencies = [ "more-asserts", "rayon", "serde", - "sha2 0.8.1", + "sha2 0.8.2", "thiserror", "toml", "wasmparser 0.59.0", - "winapi 0.3.8", + "winapi 0.3.9", "zstd", ] @@ -9815,7 +10161,7 @@ dependencies = [ "wasmtime-obj", "wasmtime-profiling", "wasmtime-runtime", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9869,7 +10215,7 @@ dependencies = [ "region", "thiserror", "wasmtime-environ", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9892,9 +10238,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" dependencies = [ "js-sys", "wasm-bindgen", @@ -9902,9 +10248,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" dependencies = [ "ring", "untrusted", @@ -9954,9 +10300,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -9976,11 +10322,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9995,7 +10341,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" dependencies = [ - "byteorder", + "byteorder 1.3.4", "bytes 0.4.12", "httparse", "log", @@ -10065,18 +10411,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.1+zstd.1.4.4" +version = "0.5.3+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5d978b793ae64375b80baf652919b148f6a496ac8802922d9999f5a553194f" +checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.3+zstd.1.4.4" +version = "2.0.5+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee25eac9753cfedd48133fa1736cbd23b774e253d89badbeac7d12b23848d3f" +checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" dependencies = [ "libc", "zstd-sys", @@ -10084,11 +10430,12 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.15+zstd.1.4.4" +version = "1.4.17+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89719b034dc22d240d5b407fb0a3fe6d29952c181cff9a9f95c0bd40b4f8f7d8" +checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" dependencies = [ "cc", "glob 0.3.0", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index f22e3427a70..7589e8d7741 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -180,6 +180,7 @@ members = [ "utils/build-script-utils", "utils/fork-tree", "utils/frame/benchmarking-cli", + "utils/frame/frame-utilities-cli", "utils/frame/rpc/support", "utils/frame/rpc/system", "utils/wasm-builder", diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 0091ef7d759..46ab9bc3daf 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -1,5 +1,5 @@ -use sc_cli::{RunCmd, Subcommand}; use structopt::StructOpt; +use sc_cli::{RunCmd, Subcommand}; #[derive(Debug, StructOpt)] pub struct Cli { diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index b3f1cfaf11f..9cd2248d654 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -66,8 +66,8 @@ impl SubstrateCli for Cli { pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); - match &cli.subcommand { - Some(subcommand) => { + match cli.subcommand { + Some(ref subcommand) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { let PartialComponents { client, backend, task_manager, import_queue, .. } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 16ab9bbe806..d8ed12f296b 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -131,6 +131,7 @@ structopt = { version = "0.3.8", optional = true } node-inspect = { version = "0.8.0-rc5", optional = true, path = "../inspect" } frame-benchmarking-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/benchmarking-cli" } substrate-build-script-utils = { version = "2.0.0-rc5", optional = true, path = "../../../utils/build-script-utils" } +substrate-frame-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/frame-utilities-cli" } [build-dependencies.sc-cli] version = "0.8.0-rc5" @@ -150,6 +151,7 @@ cli = [ "node-inspect", "sc-cli", "frame-benchmarking-cli", + "substrate-frame-cli", "sc-service/db", "structopt", "substrate-build-script-utils", diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 29e916fe018..42a13fcb390 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_cli::RunCmd; +use sc_cli::{RunCmd, KeySubcommand, SignCmd, VanityCmd, VerifyCmd}; use structopt::StructOpt; /// An overarching CLI command definition. @@ -37,6 +37,9 @@ pub enum Subcommand { #[structopt(flatten)] Base(sc_cli::Subcommand), + /// Key management cli utilities + Key(KeySubcommand), + /// The custom inspect subcommmand for decoding blocks and extrinsics. #[structopt( name = "inspect", @@ -47,4 +50,13 @@ pub enum Subcommand { /// The custom benchmark subcommmand benchmarking runtime pallets. #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(VerifyCmd), + + /// Generate a seed that provides a vanity address. + Vanity(VanityCmd), + + /// Sign a message, with a given (secret) key. + Sign(SignCmd), } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 69d9a029865..10e9413702b 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -93,6 +93,10 @@ pub fn run() -> Result<()> { Ok(()) } } + Some(Subcommand::Key(cmd)) => cmd.run(), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; runner.run_subcommand(subcommand, |config| { diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 9d19f20c5e1..9595ef424d8 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -83,6 +83,7 @@ use impls::{CurrencyToVoteHandler, Author}; /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*}; +use sp_runtime::generic::Era; /// Weights for pallets used in the runtime. mod weights; @@ -654,9 +655,9 @@ parameter_types! { pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; } - -impl frame_system::offchain::CreateSignedTransaction for Runtime where - Call: From, +impl frame_system::offchain::CreateSignedTransaction for Runtime + where + Call: From, { fn create_transaction>( call: Call, @@ -664,6 +665,7 @@ impl frame_system::offchain::CreateSignedTransaction for R account: AccountId, nonce: Index, ) -> Option<(Call, ::SignaturePayload)> { + let tip = 0; // take the biggest period possible. let period = BlockHashCount::get() .checked_next_power_of_two() @@ -674,22 +676,25 @@ impl frame_system::offchain::CreateSignedTransaction for R // The `System::block_number` is initialized with `n+1`, // so the actual block number is `n`. .saturating_sub(1); - let tip = 0; - let extra: SignedExtra = ( + let era = Era::mortal(period, current_block); + let extra = ( frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); - let raw_payload = SignedPayload::new(call, extra).map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); - }).ok()?; - let signature = raw_payload.using_encoded(|payload| { - C::sign(payload, public) - })?; + let raw_payload = SignedPayload::new(call, extra) + .map_err(|e| { + debug::warn!("Unable to create signed payload: {:?}", e); + }) + .ok()?; + let signature = raw_payload + .using_encoded(|payload| { + C::sign(payload, public) + })?; let address = Indices::unlookup(account); let (call, extra, _) = raw_payload.deconstruct(); Some((call, (address, signature.into(), extra))) diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 459df884d68..1b0288faeed 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -7,34 +7,21 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +[[bin]] +path = "src/main.rs" +name = "subkey" + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.1.29" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } node-runtime = { version = "2.0.0-rc5", path = "../../node/runtime" } node-primitives = { version = "2.0.0-rc5", path = "../../node/primitives" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -rand = "0.7.2" -clap = "2.33.0" -tiny-bip39 = "0.7" -substrate-bip39 = "0.4.1" -hex = "0.4.0" -hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.3.4" } +sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } +substrate-frame-cli = { version = "2.0.0-rc5", path = "../../../utils/frame/frame-utilities-cli" } +structopt = "0.3.14" frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -rpassword = "4.0.1" -itertools = "0.8.2" -derive_more = { version = "0.99.2" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -jsonrpc-core-client = { version = "14.2.0", features = ["http"] } -hyper = "0.12.35" -libp2p = { version = "0.23.0", default-features = false } -serde_json = "1.0" +sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } [features] bench = [] diff --git a/bin/utils/subkey/README.adoc b/bin/utils/subkey/README.adoc index 1fa0753312f..5ce0d2d3244 100644 --- a/bin/utils/subkey/README.adoc +++ b/bin/utils/subkey/README.adoc @@ -31,7 +31,7 @@ OUTPUT: `subkey` expects a message to come in on STDIN, one way to sign a message would look like this: ```bash -echo -n | subkey sign +echo -n | subkey sign --suri OUTPUT: a69da4a6ccbf81dbbbfad235fa12cf8528c18012b991ae89214de8d20d29c1280576ced6eb38b7406d1b7e03231df6dd4a5257546ddad13259356e1c3adfb509 @@ -72,7 +72,7 @@ Will output a signed and encoded `UncheckedMortalCompactExtrinsic` as hex. === Inspecting a module ID ```bash -subkey --network kusama moduleid "py/trsry" +subkey module-id "py/trsry" --network kusama OUTPUT: Public Key URI `F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29` is account: diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs new file mode 100644 index 00000000000..2e4c7a350fe --- /dev/null +++ b/bin/utils/subkey/src/lib.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use structopt::StructOpt; +use sc_cli::{ + Error, VanityCmd, SignCmd, VerifyCmd, InsertCmd, + GenerateNodeKeyCmd, GenerateCmd, InspectCmd, InspectNodeKeyCmd +}; +use substrate_frame_cli::ModuleIdCmd; +use sp_core::crypto::Ss58Codec; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "subkey", + author = "Parity Team ", + about = "Utility for generating and restoring with Substrate keys", +)] +pub enum Subkey { + /// Generate a random node libp2p key, save it to file and print its peer ID + GenerateNodeKey(GenerateNodeKeyCmd), + + /// Generate a random account + Generate(GenerateCmd), + + /// Gets a public key and a SS58 address from the provided Secret URI + InspectKey(InspectCmd), + + /// Print the peer ID corresponding to the node key in the given file + InspectNodeKey(InspectNodeKeyCmd), + + /// Insert a key to the keystore of a node. + Insert(InsertCmd), + + /// Inspect a module ID address + ModuleId(ModuleIdCmd), + + /// Sign a message, with a given (secret) key. + Sign(SignCmd), + + /// Generate a seed that provides a vanity address. + Vanity(VanityCmd), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(VerifyCmd), +} + +/// Run the subkey command, given the apropriate runtime. +pub fn run() -> Result<(), Error> + where + R: frame_system::Trait, + R::AccountId: Ss58Codec +{ + match Subkey::from_args() { + Subkey::GenerateNodeKey(cmd) => cmd.run()?, + Subkey::Generate(cmd) => cmd.run()?, + Subkey::InspectKey(cmd) => cmd.run()?, + Subkey::InspectNodeKey(cmd) => cmd.run()?, + Subkey::Insert(cmd) => cmd.run()?, + Subkey::ModuleId(cmd) => cmd.run::()?, + Subkey::Vanity(cmd) => cmd.run()?, + Subkey::Verify(cmd) => cmd.run()?, + Subkey::Sign(cmd) => cmd.run()?, + }; + + Ok(()) +} diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index 9455e08175a..dd14425130b 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -16,814 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] -extern crate test; +//! Subkey utility, based on node_runtime. -use bip39::{Language, Mnemonic, MnemonicType}; -use clap::{App, ArgMatches, SubCommand}; -use codec::{Decode, Encode}; -use hex_literal::hex; -use itertools::Itertools; -use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; -use node_primitives::{Balance, Hash, Index, AccountId, Signature}; -use node_runtime::{BalancesCall, Call, Runtime, SignedPayload, UncheckedExtrinsic, VERSION}; -use serde_json::json; -use sp_core::{ - crypto::{set_default_ss58_version, Ss58AddressFormat, Ss58Codec}, - ed25519, sr25519, ecdsa, Pair, Public, H256, hexdisplay::HexDisplay, -}; -use sp_runtime::{traits::{AccountIdConversion, IdentifyAccount, Verify}, generic::Era, ModuleId}; -use std::{ - convert::{TryInto, TryFrom}, io::{stdin, Read}, str::FromStr, path::PathBuf, fs, fmt, -}; +use node_runtime::Runtime; -mod rpc; -mod vanity; - -enum OutputType { - Json, - Text, -} - -impl<'a> TryFrom<&'a str> for OutputType { - type Error = (); - - fn try_from(s: &'a str) -> Result { - match s { - "json" => Ok(OutputType::Json), - "text" => Ok(OutputType::Text), - _ => Err(()), - } - } - -} - -trait Crypto: Sized { - type Pair: Pair; - type Public: Public + Ss58Codec + AsRef<[u8]> + std::hash::Hash; - fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { - Self::Pair::from_string(suri, password).expect("Invalid phrase") - } - fn ss58_from_pair(pair: &Self::Pair) -> String where - ::Public: PublicT, - { - pair.public().into_runtime().into_account().to_ss58check() - } - fn public_from_pair(pair: &Self::Pair) -> Self::Public { - pair.public() - } - fn print_from_uri( - uri: &str, - password: Option<&str>, - network_override: Option, - output: OutputType, - ) where - ::Public: PublicT, - { - let v = network_override.unwrap_or_default(); - if let Ok((pair, seed)) = Self::Pair::from_phrase(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretPhrase": uri, - "networkId": String::from(v), - "secretSeed": format_seed::(seed), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret phrase `{}` is account:\n \ - Network ID/version: {}\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - format_seed::(seed), - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((pair, seed)) = Self::Pair::from_string_with_seed(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretKeyUri": uri, - "networkId": String::from(v), - "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret Key URI `{}` is account:\n \ - Network ID/version: {}\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((public_key, v)) = - ::Public::from_string_with_version(uri) - { - let v = network_override.unwrap_or(v); - - match output { - OutputType::Json => { - let json = json!({ - "publicKeyUri": uri, - "networkId": String::from(v), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key.clone()), - "ss58Address": public_key.to_ss58check_with_version(v), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Public Key URI `{}` is account:\n \ - Network ID/version: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - format_public_key::(public_key.clone()), - format_account_id::(public_key.clone()), - public_key.to_ss58check_with_version(v), - ); - }, - } - } else { - eprintln!("Invalid phrase/URI given"); - } - } -} - -struct Ed25519; - -impl Crypto for Ed25519 { - type Pair = ed25519::Pair; - type Public = ed25519::Public; - - fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { - ed25519::Pair::from_legacy_string(suri, password_override) - } -} - -struct Sr25519; - -impl Crypto for Sr25519 { - type Pair = sr25519::Pair; - type Public = sr25519::Public; -} - -struct Ecdsa; - -impl Crypto for Ecdsa { - type Pair = ecdsa::Pair; - type Public = ecdsa::Public; -} - -type SignatureOf = <::Pair as Pair>::Signature; -type PublicOf = <::Pair as Pair>::Public; -type SeedOf = <::Pair as Pair>::Seed; -type AccountPublic = ::Signer; - -trait SignatureT: AsRef<[u8]> + AsMut<[u8]> + Default { - /// Converts the signature into a runtime account signature, if possible. If not possible, bombs out. - fn into_runtime(self) -> Signature { - panic!("This cryptography isn't supported for this runtime.") - } -} -trait PublicT: Sized + AsRef<[u8]> + Ss58Codec { - /// Converts the public key into a runtime account public key, if possible. If not possible, bombs out. - fn into_runtime(self) -> AccountPublic { - panic!("This cryptography isn't supported for this runtime.") - } -} - -impl SignatureT for sr25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ed25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ecdsa::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl PublicT for sr25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ed25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ecdsa::Public { fn into_runtime(self) -> AccountPublic { self.into() } } - -fn get_usage() -> String { - let networks = Ss58AddressFormat::all().iter().cloned().map(String::from).join("/"); - let default_network = String::from(Ss58AddressFormat::default()); - format!(" - -e, --ed25519 'Use Ed25519/BIP39 cryptography' - -k, --secp256k1 'Use SECP256k1/ECDSA/BIP39 cryptography' - -s, --sr25519 'Use Schnorr/Ristretto x25519/BIP39 cryptography' - [network] -n, --network 'Specify a network. One of {}. Default is {}' - [password] -p, --password 'The password for the key' - --password-interactive 'You will be prompted for the password for the key.' - [output] -o, --output 'Specify an output format. One of text, json. Default is text.' - ", networks, default_network) -} - -fn get_app<'a, 'b>(usage: &'a str) -> App<'a, 'b> { - App::new("subkey") - .author("Parity Team ") - .about("Utility for generating and restoring with Substrate keys") - .version(env!("CARGO_PKG_VERSION")) - .args_from_usage(usage) - .subcommands(vec![ - SubCommand::with_name("generate") - .about("Generate a random account") - .args_from_usage("[words] -w, --words \ - 'The number of words in the phrase to generate. One of 12 \ - (default), 15, 18, 21 and 24.' - "), - SubCommand::with_name("generate-node-key") - .about("Generate a random node libp2p key, save it to file and print its peer ID") - .args_from_usage("[file] 'Name of file to save secret key to'"), - SubCommand::with_name("inspect") - .about("Gets a public key and a SS58 address from the provided Secret URI") - .args_from_usage("[uri] 'A Key URI to be inspected. May be a secret seed, \ - secret URI (with derivation paths and password), SS58 or public URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("inspect-node-key") - .about("Print the peer ID corresponding to the node key in the given file") - .args_from_usage("[file] 'Name of file to read the secret key from'"), - SubCommand::with_name("sign") - .about("Sign a message, provided on STDIN, with a given (secret) key") - .args_from_usage(" - -h, --hex 'The message on STDIN is hex-encoded data' - 'The secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("sign-transaction") - .about("Sign transaction from encoded Call. Returns a signed and encoded \ - UncheckedMortalCompactExtrinsic as hex.") - .args_from_usage(" - -c, --call 'The call, hex-encoded.' - -n, --nonce 'The nonce.' - -p, --password 'The password for the key.' - -h, --prior-block-hash 'The prior block hash, hex-encoded.' - -s, --suri 'The secret key URI.' - "), - SubCommand::with_name("transfer") - .about("Author and sign a Node pallet_balances::Transfer transaction with a given (secret) key") - .args_from_usage(" - -g, --genesis 'The genesis hash or a recognized \ - chain identifier (dev, elm, alex).' - 'The signing secret key URI.' - 'The destination account public key URI.' - 'The number of units to transfer.' - 'The signing account's transaction index.' - "), - SubCommand::with_name("vanity") - .about("Generate a seed that provides a vanity address") - .args_from_usage(" - -n, --number 'Number of keys to generate' - 'Desired pattern' - "), - SubCommand::with_name("verify") - .about("Verify a signature for a message, provided on STDIN, with a given \ - (public or secret) key") - .args_from_usage(" - -h, --hex 'The message on STDIN is hex-encoded data' - 'Signature, hex-encoded.' - 'The public or secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("insert") - .about("Insert a key to the keystore of a node") - .args_from_usage(" - 'The secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - 'Key type, examples: \"gran\", or \"imon\" ' - [node-url] 'Node JSON-RPC endpoint, default \"http:://localhost:9933\"' - "), - SubCommand::with_name("moduleid") - .about("Inspect a module ID address") - .args_from_usage(" - 'The module ID used to derive the account' - ") - ]) -} - -fn main() -> Result<(), Error> { - let usage = get_usage(); - let matches = get_app(&usage).get_matches(); - - if matches.is_present("ed25519") { - return execute::(matches); - } - if matches.is_present("secp256k1") { - return execute::(matches) - } - return execute::(matches) -} - -/// Get `URI` from CLI or prompt the user. -/// -/// `URI` is extracted from `matches` by using `match_name`. -/// -/// If the `URI` given as CLI argument is a file, the file content is taken as `URI`. -/// If no `URI` is given to the CLI, the user is prompted for it. -fn get_uri(match_name: &str, matches: &ArgMatches) -> Result { - let uri = if let Some(uri) = matches.value_of(match_name) { - let file = PathBuf::from(uri); - if file.is_file() { - fs::read_to_string(uri)? - .trim_end() - .into() - } else { - uri.into() - } - } else { - rpassword::read_password_from_tty(Some("URI: "))? - }; - - Ok(uri) -} - -#[derive(derive_more::Display, derive_more::From)] -enum Error { - Static(&'static str), - Io(std::io::Error), - Formatted(String), -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -fn static_err(msg: &'static str) -> Result<(), Error> { - Err(Error::Static(msg)) -} - -fn execute(matches: ArgMatches) -> Result<(), Error> -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let password_interactive = matches.is_present("password-interactive"); - let password = matches.value_of("password"); - - let password = if password.is_some() && password_interactive { - return static_err("`--password` given and `--password-interactive` selected!"); - } else if password_interactive { - Some( - rpassword::read_password_from_tty(Some("Key password: "))? - ) - } else { - password.map(Into::into) - }; - let password = password.as_ref().map(String::as_str); - - let maybe_network: Option = match matches.value_of("network").map(|network| { - network - .try_into() - .map_err(|_| Error::Static("Invalid network name. See --help for available networks.")) - }) { - Some(Err(e)) => return Err(e), - Some(Ok(v)) => Some(v), - None => None, - }; - - if let Some(network) = maybe_network { - set_default_ss58_version(network); - } - - let output: OutputType = match matches.value_of("output").map(TryInto::try_into) { - Some(Err(_)) => return Err(Error::Static("Invalid output name. See --help for available outputs.")), - Some(Ok(v)) => v, - None => OutputType::Text, - }; - - match matches.subcommand() { - ("generate", Some(matches)) => { - let mnemonic = generate_mnemonic(matches)?; - C::print_from_uri(mnemonic.phrase(), password, maybe_network, output); - } - ("generate-node-key", Some(matches)) => { - let file = matches.value_of("file").ok_or(Error::Static("Output file name is required"))?; - - let keypair = libp2p_ed25519::Keypair::generate(); - let secret = keypair.secret(); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); - - fs::write(file, secret.as_ref())?; - - println!("{}", peer_id); - } - ("inspect", Some(matches)) => { - C::print_from_uri(&get_uri("uri", &matches)?, password, maybe_network, output); - } - ("inspect-node-key", Some(matches)) => { - let file = matches.value_of("file").ok_or(Error::Static("Input file name is required"))?; - - let mut file_content = fs::read(file)?; - let secret = libp2p_ed25519::SecretKey::from_bytes(&mut file_content) - .map_err(|_| Error::Static("Bad node key file"))?; - let keypair = libp2p_ed25519::Keypair::from(secret); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); - - println!("{}", peer_id); - } - ("sign", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let signature = do_sign::(&suri, message, password)?; - println!("{}", signature); - } - ("verify", Some(matches)) => { - let uri = get_uri("uri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let is_valid_signature = do_verify::(matches, &uri, message)?; - if is_valid_signature { - println!("Signature verifies correctly."); - } else { - return static_err("Signature invalid."); - } - } - ("vanity", Some(matches)) => { - let desired: String = matches - .value_of("pattern") - .map(str::to_string) - .unwrap_or_default(); - let result = vanity::generate_key::(&desired)?; - let formated_seed = format_seed::(result.seed); - C::print_from_uri(&formated_seed, None, maybe_network, output); - } - ("transfer", Some(matches)) => { - let signer = read_pair::(matches.value_of("from"), password)?; - let index = read_required_parameter::(matches, "index")?; - let genesis_hash = read_genesis_hash(matches)?; - - let to: AccountId = read_account_id(matches.value_of("to")); - let amount = read_required_parameter::(matches, "amount")?; - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("sign-transaction", Some(matches)) => { - let signer = read_pair::(matches.value_of("suri"), password)?; - let index = read_required_parameter::(matches, "nonce")?; - let genesis_hash = read_genesis_hash(matches)?; - - let call = matches.value_of("call").expect("call is required; qed"); - let function: Call = hex::decode(&call) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("insert", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let pair = read_pair::(Some(&suri), password)?; - let node_url = matches.value_of("node-url").unwrap_or("http://localhost:9933"); - let key_type = matches.value_of("key-type").ok_or(Error::Static("Key type id is required"))?; - - // Just checking - let _key_type_id = sp_core::crypto::KeyTypeId::try_from(key_type) - .map_err(|_| Error::Static("Cannot convert argument to keytype: argument should be 4-character string"))?; - - let rpc = rpc::RpcClient::new(node_url.to_string()); - - rpc.insert_key( - key_type.to_string(), - suri, - sp_core::Bytes(pair.public().as_ref().to_vec()), - ); - } - ("moduleid", Some(matches)) => { - let id = get_uri("id", &matches)?; - if id.len() != 8 { - Err("a module id must be a string of 8 characters")? - } - - let id_fixed_array: [u8; 8] = id.as_bytes().try_into() - .map_err(|_| Error::Static("Cannot convert argument to moduleid: argument should be 8-character string"))?; - - let account_id: AccountId = ModuleId(id_fixed_array).into_account(); - let v = maybe_network.unwrap_or(Ss58AddressFormat::SubstrateAccount); - - C::print_from_uri(&account_id.to_ss58check_with_version(v), password, maybe_network, output); - } - _ => print_usage(&matches), - } - - Ok(()) -} - -/// Creates a new randomly generated mnemonic phrase. -fn generate_mnemonic(matches: &ArgMatches) -> Result { - let words = match matches.value_of("words") { - Some(words) => { - let num = usize::from_str(words).map_err(|_| Error::Static("Invalid number given for --words"))?; - MnemonicType::for_word_count(num) - .map_err(|_| Error::Static("Invalid number of words given for phrase: must be 12/15/18/21/24"))? - }, - None => MnemonicType::Words12, - }; - Ok(Mnemonic::new(words, Language::English)) -} - -fn do_sign(suri: &str, message: Vec, password: Option<&str>) -> Result -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let pair = read_pair::(Some(suri), password)?; - let signature = pair.sign(&message); - Ok(format_signature::(&signature)) -} - -fn do_verify(matches: &ArgMatches, uri: &str, message: Vec) -> Result -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - - let signature = read_signature::(matches)?; - let pubkey = read_public_key::(Some(uri)); - Ok(<::Pair as Pair>::verify(&signature, &message, &pubkey)) -} - -fn decode_hex>(message: T) -> Result, Error> { - hex::decode(message).map_err(|e| Error::Formatted(format!("Invalid hex ({})", e))) -} - -fn read_message_from_stdin(should_decode: bool) -> Result, Error> { - let mut message = vec![]; - stdin() - .lock() - .read_to_end(&mut message)?; - if should_decode { - message = decode_hex(&message)?; - } - Ok(message) -} - -fn read_required_parameter(matches: &ArgMatches, name: &str) -> Result where - ::Err: std::fmt::Debug, -{ - let str_value = matches - .value_of(name) - .expect("parameter is required; thus it can't be None; qed"); - str::parse::(str_value).map_err(|_| - Error::Formatted(format!("Invalid `{}' parameter; expecting an integer.", name)) - ) -} - -fn read_genesis_hash(matches: &ArgMatches) -> Result { - let genesis_hash: Hash = match matches.value_of("genesis").unwrap_or("alex") { - "elm" => hex!["10c08714a10c7da78f40a60f6f732cf0dba97acfb5e2035445b032386157d5c3"].into(), - "alex" => hex!["dcd1346701ca8396496e52aa2785b1748deb6db09551b72159dcb3e08991025b"].into(), - h => Decode::decode(&mut &decode_hex(h)?[..]) - .expect("Invalid genesis hash or unrecognized chain identifier"), - }; - println!( - "Using a genesis hash of {}", - HexDisplay::from(&genesis_hash.as_ref()) - ); - Ok(genesis_hash) -} - -fn read_signature(matches: &ArgMatches) -> Result, Error> -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let sig_data = matches - .value_of("sig") - .expect("signature parameter is required; thus it can't be None; qed"); - let mut signature = <::Pair as Pair>::Signature::default(); - let sig_data = decode_hex(sig_data)?; - if sig_data.len() != signature.as_ref().len() { - return Err(Error::Formatted(format!( - "signature has an invalid length. read {} bytes, expected {} bytes", - sig_data.len(), - signature.as_ref().len(), - ))); - } - signature.as_mut().copy_from_slice(&sig_data); - Ok(signature) -} - -fn read_public_key(matched_uri: Option<&str>) -> PublicOf -where - PublicOf: PublicT, -{ - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(pubkey_vec) = hex::decode(uri) { - ::Public::from_slice(pubkey_vec.as_slice()) - } else { - ::Public::from_string(uri) - .ok() - .expect("Invalid URI; expecting either a secret URI or a public URI.") - } -} - -fn read_account_id(matched_uri: Option<&str>) -> AccountId { - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(data_vec) = hex::decode(uri) { - AccountId::try_from(data_vec.as_slice()) - .expect("Invalid hex length for account ID; should be 32 bytes") - } else { - AccountId::from_ss58check(uri).ok() - .expect("Invalid SS58-check address given for account ID.") - } -} - -fn read_pair( - matched_suri: Option<&str>, - password: Option<&str>, -) -> Result<::Pair, Error> where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let suri = matched_suri.ok_or(Error::Static("parameter is required; thus it can't be None; qed"))?; - Ok(C::pair_from_suri(suri, password)) -} - -fn format_signature(signature: &SignatureOf) -> String { - format!("{}", HexDisplay::from(&signature.as_ref())) -} - -fn format_seed(seed: SeedOf) -> String { - format!("0x{}", HexDisplay::from(&seed.as_ref())) -} - -fn format_public_key(public_key: PublicOf) -> String { - format!("0x{}", HexDisplay::from(&public_key.as_ref())) -} - -fn format_account_id(public_key: PublicOf) -> String where - PublicOf: PublicT, -{ - format!("0x{}", HexDisplay::from(&public_key.into_runtime().into_account().as_ref())) -} - -fn create_extrinsic( - function: Call, - index: Index, - signer: C::Pair, - genesis_hash: H256, -) -> UncheckedExtrinsic where - PublicOf: PublicT, - SignatureOf: SignatureT, -{ - let extra = |i: Index, f: Balance| { - ( - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(Era::Immortal), - frame_system::CheckNonce::::from(i), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(f), - ) - }; - let raw_payload = SignedPayload::from_raw( - function, - extra(index, 0), - ( - VERSION.spec_version, - VERSION.transaction_version, - genesis_hash, - genesis_hash, - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)).into_runtime(); - let signer = signer.public().into_runtime(); - let (function, extra, _) = raw_payload.deconstruct(); - - UncheckedExtrinsic::new_signed( - function, - signer.into_account().into(), - signature, - extra, - ) -} - -fn print_extrinsic(extrinsic: UncheckedExtrinsic) { - println!("0x{}", HexDisplay::from(&extrinsic.encode())); -} - -fn print_usage(matches: &ArgMatches) { - println!("{}", matches.usage()); -} - -#[cfg(test)] -mod tests { - use super::*; - - fn test_generate_sign_verify() - where - SignatureOf: SignatureT, - PublicOf: PublicT, - { - let usage = get_usage(); - let app = get_app(&usage); - let password = None; - - // Generate public key and seed. - let arg_vec = vec!["subkey", "generate"]; - - let matches = app.clone().get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - let mnemonic = generate_mnemonic(matches).expect("generate failed"); - - let (pair, seed) = - <::Pair as Pair>::from_phrase(mnemonic.phrase(), password) - .unwrap(); - let public_key = CryptoType::public_from_pair(&pair); - let public_key = format_public_key::(public_key); - let seed = format_seed::(seed); - let message = "Blah Blah\n".as_bytes().to_vec(); - - let signature = do_sign::(&seed, message.clone(), password).expect("signing failed"); - - // Verify the previous signature. - let arg_vec = vec!["subkey", "verify", &signature[..], &public_key[..]]; - - let matches = get_app(&usage).get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - - assert!(do_verify::(matches, &public_key, message).expect("verify failed")); - } - - #[test] - fn generate_sign_verify_should_work_for_ed25519() { - test_generate_sign_verify::(); - } - - #[test] - fn generate_sign_verify_should_work_for_sr25519() { - test_generate_sign_verify::(); - } - - #[test] - fn should_work() { - let s = "0123456789012345678901234567890123456789012345678901234567890123"; - - let d1: Hash = hex::decode(s) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let d2: Hash = { - let mut gh: [u8; 32] = Default::default(); - gh.copy_from_slice(hex::decode(s).unwrap().as_ref()); - Hash::from(gh) - }; - - assert_eq!(d1, d2); - } +fn main() -> Result<(), sc_cli::Error> { + subkey::run::() } diff --git a/bin/utils/subkey/src/rpc.rs b/bin/utils/subkey/src/rpc.rs deleted file mode 100644 index e24cf50dc45..00000000000 --- a/bin/utils/subkey/src/rpc.rs +++ /dev/null @@ -1,51 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Helper to run commands against current node RPC - -use futures::Future; -use hyper::rt; -use node_primitives::Hash; -use sc_rpc::author::AuthorClient; -use jsonrpc_core_client::transports::http; -use sp_core::Bytes; - -pub struct RpcClient { url: String } - -impl RpcClient { - pub fn new(url: String) -> Self { Self { url } } - - pub fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) { - let url = self.url.clone(); - - rt::run( - http::connect(&url) - .and_then(|client: AuthorClient| { - client.insert_key(key_type, suri, public).map(|_| ()) - }) - .map_err(|e| { - eprintln!("Error inserting key: {:?}", e); - }) - ); - } -} diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 85a1eb0fe0a..2fe1b30ab14 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -23,7 +23,13 @@ lazy_static = "1.4.0" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" fdlimit = "0.1.4" +libp2p = "0.22" +parity-scale-codec = "1.3.0" +hex = "0.4.2" +rand = "0.7.3" +bip39 = "0.6.0-beta.1" serde_json = "1.0.41" +sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } sc-informant = { version = "0.8.0-rc5", path = "../informant" } sp-panic-handler = { version = "2.0.0-rc5", path = "../../primitives/panic-handler" } sc-client-api = { version = "2.0.0-rc5", path = "../api" } @@ -53,6 +59,8 @@ nix = "0.17.0" [dev-dependencies] tempfile = "3.1.0" +sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } [features] wasmtime = [ diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index db13fff7614..4ba76d7a063 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -85,6 +85,23 @@ arg_enum! { } } +arg_enum! { + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum CryptoScheme { + Ed25519, + Sr25519, + Ecdsa, + } +} + +arg_enum! { + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum OutputType { + Json, + Text, + } +} + arg_enum! { /// How to execute blocks #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 118832a79d2..e175d498941 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use crate::params::{BlockNumber, DatabaseParams, PruningParams, SharedParams}; +use crate::params::{GenericNumber, DatabaseParams, PruningParams, SharedParams}; use crate::CliConfiguration; use log::info; use sc_service::{ @@ -44,13 +44,13 @@ pub struct ExportBlocksCmd { /// /// Default is 1. #[structopt(long = "from", value_name = "BLOCK")] - pub from: Option, + pub from: Option, /// Specify last block number. /// /// Default is best block. #[structopt(long = "to", value_name = "BLOCK")] - pub to: Option, + pub to: Option, /// Use binary output rather than JSON. #[structopt(long)] diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs new file mode 100644 index 00000000000..9eeca55a2ee --- /dev/null +++ b/client/cli/src/commands/generate.rs @@ -0,0 +1,91 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `generate` subcommand +use bip39::{MnemonicType, Mnemonic, Language}; +use structopt::StructOpt; +use crate::{ + utils::print_from_uri, KeystoreParams, Error, + with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, +}; + +/// The `generate` command +#[derive(Debug, StructOpt)] +#[structopt(name = "generate", about = "Generate a random account")] +pub struct GenerateCmd { + /// The number of words in the phrase to generate. One of 12 (default), 15, 18, 21 and 24. + #[structopt(long, short = "w", value_name = "WORDS")] + words: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl GenerateCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let words = match self.words { + Some(words) => { + MnemonicType::for_word_count(words) + .map_err(|_| { + Error::Input("Invalid number of words given for phrase: must be 12/15/18/21/24".into()) + })? + }, + None => MnemonicType::Words12, + }; + let mnemonic = Mnemonic::new(words, Language::English); + let password = self.keystore_params.read_password()?; + let maybe_network = self.network_scheme.network.clone(); + let output = self.output_scheme.output_type.clone(); + + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + mnemonic.phrase(), + password, + maybe_network, + output + ) + ); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::GenerateCmd; + use structopt::StructOpt; + + #[test] + fn generate() { + let generate = GenerateCmd::from_iter(&["generate", "--password", "12345"]); + assert!(generate.run().is_ok()) + } +} diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs new file mode 100644 index 00000000000..9ee04d23e34 --- /dev/null +++ b/client/cli/src/commands/generate_node_key.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `generate-node-key` subcommand + +use crate::Error; +use structopt::StructOpt; +use std::{path::PathBuf, fs}; +use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; + +/// The `generate-node-key` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "generate-node-key", + about = "Generate a random node libp2p key, save it to file and print its peer ID" +)] +pub struct GenerateNodeKeyCmd { + /// Name of file to save secret key to. + #[structopt(long)] + file: PathBuf, +} + +impl GenerateNodeKeyCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let file = &self.file; + + let keypair = libp2p_ed25519::Keypair::generate(); + let secret = keypair.secret(); + let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + + fs::write(file, hex::encode(secret.as_ref()))?; + + println!("{}", peer_id); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::Builder; + use std::io::Read; + + #[test] + fn generate_node_key() { + let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); + let generate = + GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", "/tmp/keyfile"]); + assert!(generate.run().is_ok()); + let mut buf = String::new(); + assert!(file.read_to_string(&mut buf).is_ok()); + assert!(hex::decode(buf).is_ok()); + } +} diff --git a/client/cli/src/commands/insert.rs b/client/cli/src/commands/insert.rs new file mode 100644 index 00000000000..5e4a0d42ffe --- /dev/null +++ b/client/cli/src/commands/insert.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `insert` subcommand + +use crate::{Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme}; +use structopt::StructOpt; +use sp_core::{crypto::KeyTypeId, traits::BareCryptoStore}; +use std::convert::TryFrom; +use sc_service::config::KeystoreConfig; +use sc_keystore::Store as KeyStore; +use sp_core::crypto::SecretString; + +/// The `insert` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "insert", + about = "Insert a key to the keystore of a node." +)] +pub struct InsertCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Key type, examples: "gran", or "imon" + #[structopt(long)] + key_type: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InsertCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let suri = utils::read_uri(self.suri.as_ref())?; + let base_path = self.shared_params.base_path.as_ref() + .ok_or_else(|| Error::Other("please supply base path".into()))?; + + let (keystore, public) = match self.keystore_params.keystore_config(base_path)? { + KeystoreConfig::Path { path, password } => { + let public = with_crypto_scheme!( + self.crypto_scheme.scheme, + to_vec(&suri, password.clone()) + )?; + let keystore = KeyStore::open(path, password) + .map_err(|e| format!("{}", e))?; + (keystore, public) + }, + _ => unreachable!("keystore_config always returns path and password; qed") + }; + + let key_type = KeyTypeId::try_from(self.key_type.as_str()) + .map_err(|_| { + Error::Other("Cannot convert argument to keytype: argument should be 4-character string".into()) + })?; + + keystore.write() + .insert_unknown(key_type, &suri, &public[..]) + .map_err(|e| Error::Other(format!("{:?}", e)))?; + + Ok(()) + } +} + +fn to_vec(uri: &str, pass: Option) -> Result, Error> { + let p = utils::pair_from_suri::

(uri, pass)?; + Ok(p.public().as_ref().to_vec()) +} diff --git a/client/cli/src/commands/inspect.rs b/client/cli/src/commands/inspect.rs new file mode 100644 index 00000000000..3356d7ca07a --- /dev/null +++ b/client/cli/src/commands/inspect.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `inspect` subcommand + +use crate::{ + utils, KeystoreParams, with_crypto_scheme, NetworkSchemeFlag, + OutputTypeFlag, CryptoSchemeFlag, Error, +}; +use structopt::StructOpt; +/// The `inspect` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "inspect-key", + about = "Gets a public key and a SS58 address from the provided Secret URI" +)] +pub struct InspectCmd { + /// A Key URI to be inspected. May be a secret seed, secret URI + /// (with derivation paths and password), SS58 or public URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + uri: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InspectCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let uri = utils::read_uri(self.uri.as_ref())?; + let password = self.keystore_params.read_password()?; + + use utils::print_from_uri; + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &uri, + password, + self.network_scheme.network.clone(), + self.output_scheme.output_type.clone() + ) + ); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::InspectCmd; + use structopt::StructOpt; + + #[test] + fn inspect() { + let words = + "remember fiber forum demise paper uniform squirrel feel access exclude casual effort"; + let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; + + let inspect = + InspectCmd::from_iter(&["inspect-key", "--uri", words, "--password", "12345"]); + assert!(inspect.run().is_ok()); + + let inspect = InspectCmd::from_iter(&["inspect-key", "--uri", seed]); + assert!(inspect.run().is_ok()); + } +} diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs new file mode 100644 index 00000000000..be0b88589d5 --- /dev/null +++ b/client/cli/src/commands/inspect_node_key.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `inspect-node-key` subcommand + +use crate::{Error, NetworkSchemeFlag}; +use std::fs; +use libp2p::identity::{PublicKey, ed25519}; +use std::path::PathBuf; +use structopt::StructOpt; + +/// The `inspect-node-key` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "inspect-node-key", + about = "Print the peer ID corresponding to the node key in the given file." +)] +pub struct InspectNodeKeyCmd { + /// Name of file to read the secret key from. + #[structopt(long)] + file: PathBuf, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, +} + +impl InspectNodeKeyCmd { + /// runs the command + pub fn run(&self) -> Result<(), Error> { + let mut file_content = hex::decode(fs::read(&self.file)?) + .map_err(|_| "failed to decode secret as hex")?; + let secret = ed25519::SecretKey::from_bytes(&mut file_content) + .map_err(|_| "Bad node key file")?; + + let keypair = ed25519::Keypair::from(secret); + let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + + println!("{}", peer_id); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::super::GenerateNodeKeyCmd; + + #[test] + fn inspect_node_key() { + let path = tempfile::tempdir().unwrap().into_path().join("node-id").into_os_string(); + let path = path.to_str().unwrap(); + let cmd = GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", path.clone()]); + + assert!(cmd.run().is_ok()); + + let cmd = InspectNodeKeyCmd::from_iter(&["inspect-node-key", "--file", path]); + assert!(cmd.run().is_ok()); + } +} diff --git a/client/cli/src/commands/key.rs b/client/cli/src/commands/key.rs new file mode 100644 index 00000000000..61145eace10 --- /dev/null +++ b/client/cli/src/commands/key.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Key related CLI utilities + +use crate::Error; +use structopt::StructOpt; + +use super::{ + insert::InsertCmd, + inspect::InspectCmd, + generate::GenerateCmd, + inspect_node_key::InspectNodeKeyCmd, + generate_node_key::GenerateNodeKeyCmd, +}; + +/// key utilities for the cli. +#[derive(Debug, StructOpt)] +pub enum KeySubcommand { + /// Generate a random node libp2p key, save it to file and print its peer ID + GenerateNodeKey(GenerateNodeKeyCmd), + + /// Generate a random account + Generate(GenerateCmd), + + /// Gets a public key and a SS58 address from the provided Secret URI + InspectKey(InspectCmd), + + /// Print the peer ID corresponding to the node key in the given file + InspectNodeKey(InspectNodeKeyCmd), + + /// Insert a key to the keystore of a node. + Insert(InsertCmd), +} + +impl KeySubcommand { + /// run the key subcommands + pub fn run(&self) -> Result<(), Error> { + match self { + KeySubcommand::GenerateNodeKey(cmd) => cmd.run(), + KeySubcommand::Generate(cmd) => cmd.run(), + KeySubcommand::InspectKey(cmd) => cmd.run(), + KeySubcommand::Insert(cmd) => cmd.run(), + KeySubcommand::InspectNodeKey(cmd) => cmd.run(), + } + } +} diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 5d4f4fe18db..33472b29a5e 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -21,20 +21,42 @@ mod export_blocks_cmd; mod export_state_cmd; mod import_blocks_cmd; mod purge_chain_cmd; +mod sign; +mod verify; +mod vanity; mod revert_cmd; mod run_cmd; +mod generate_node_key; +mod generate; +mod insert; +mod inspect_node_key; +mod inspect; +mod key; +pub mod utils; -pub use self::build_spec_cmd::BuildSpecCmd; -pub use self::check_block_cmd::CheckBlockCmd; -pub use self::export_blocks_cmd::ExportBlocksCmd; -pub use self::export_state_cmd::ExportStateCmd; -pub use self::import_blocks_cmd::ImportBlocksCmd; -pub use self::purge_chain_cmd::PurgeChainCmd; -pub use self::revert_cmd::RevertCmd; -pub use self::run_cmd::RunCmd; use std::fmt::Debug; use structopt::StructOpt; +pub use self::{ + build_spec_cmd::BuildSpecCmd, + check_block_cmd::CheckBlockCmd, + export_blocks_cmd::ExportBlocksCmd, + export_state_cmd::ExportStateCmd, + import_blocks_cmd::ImportBlocksCmd, + purge_chain_cmd::PurgeChainCmd, + sign::SignCmd, + generate::GenerateCmd, + insert::InsertCmd, + inspect::InspectCmd, + generate_node_key::GenerateNodeKeyCmd, + inspect_node_key::InspectNodeKeyCmd, + key::KeySubcommand, + vanity::VanityCmd, + verify::VerifyCmd, + revert_cmd::RevertCmd, + run_cmd::RunCmd, +}; + /// All core commands that are provided by default. /// /// The core commands are split into multiple subcommands and `Run` is the default subcommand. From @@ -54,14 +76,14 @@ pub enum Subcommand { /// Validate a single block. CheckBlock(CheckBlockCmd), + /// Export state as raw chain spec. + ExportState(ExportStateCmd), + /// Revert chain to the previous state. Revert(RevertCmd), /// Remove the whole chain data. PurgeChain(PurgeChainCmd), - - /// Export state as raw chain spec. - ExportState(ExportStateCmd), } /// Macro that helps implement CliConfiguration on an enum of subcommand automatically @@ -425,5 +447,12 @@ macro_rules! substrate_cli_subcommands { } substrate_cli_subcommands!( - Subcommand => BuildSpec, ExportBlocks, ImportBlocks, CheckBlock, Revert, PurgeChain, ExportState + Subcommand => + BuildSpec, + ExportBlocks, + ExportState, + ImportBlocks, + CheckBlock, + Revert, + PurgeChain ); diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index bbfb0d2ff99..b2e3c1bf8e2 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use crate::params::{BlockNumber, PruningParams, SharedParams}; +use crate::params::{GenericNumber, PruningParams, SharedParams}; use crate::CliConfiguration; use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -32,7 +32,7 @@ use sc_client_api::{Backend, UsageProvider}; pub struct RevertCmd { /// Number of blocks to revert. #[structopt(default_value = "256")] - pub num: BlockNumber, + pub num: GenericNumber, #[allow(missing_docs)] #[structopt(flatten)] diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs new file mode 100644 index 00000000000..605fd5b1231 --- /dev/null +++ b/client/cli/src/commands/sign.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementation of the `sign` subcommand +use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams}; +use structopt::StructOpt; +use sp_core::crypto::SecretString; + +/// The `sign` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "sign", + about = "Sign a message, with a given (secret) key" +)] +pub struct SignCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Message to sign, if not provided you will be prompted to + /// pass the message via STDIN + #[structopt(long)] + message: Option, + + /// The message on STDIN is hex-encoded data + #[structopt(long)] + hex: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + + +impl SignCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let message = utils::read_message(self.message.as_ref(), self.hex)?; + let suri = utils::read_uri(self.suri.as_ref())?; + let password = self.keystore_params.read_password()?; + + let signature = with_crypto_scheme!( + self.crypto_scheme.scheme, + sign(&suri, password, message) + )?; + + println!("{}", signature); + Ok(()) + } +} + +fn sign(suri: &str, password: Option, message: Vec) -> error::Result { + let pair = utils::pair_from_suri::

(suri, password)?; + Ok(format!("{}", hex::encode(pair.sign(&message)))) +} + +#[cfg(test)] +mod test { + use super::SignCmd; + use structopt::StructOpt; + + #[test] + fn sign() { + let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; + + let sign = SignCmd::from_iter(&[ + "sign", + "--suri", + seed, + "--message", + &seed[2..], + "--password", + "12345" + ]); + assert!(sign.run().is_ok()); + } +} diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs new file mode 100644 index 00000000000..96b6128057a --- /dev/null +++ b/client/cli/src/commands/utils.rs @@ -0,0 +1,233 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! subcommand utilities +use std::{io::Read, path::PathBuf}; +use sp_core::{ + Pair, hexdisplay::HexDisplay, + crypto::{Ss58Codec, Ss58AddressFormat}, +}; +use sp_runtime::{MultiSigner, traits::IdentifyAccount}; +use crate::{OutputType, error::{self, Error}}; +use serde_json::json; +use sp_core::crypto::{SecretString, Zeroize, ExposeSecret}; + +/// Public key type for Runtime +pub type PublicFor

=

::Public; +/// Seed type for Runtime +pub type SeedFor

=

::Seed; + +/// helper method to fetch uri from `Option` either as a file or read from stdin +pub fn read_uri(uri: Option<&String>) -> error::Result { + let uri = if let Some(uri) = uri { + let file = PathBuf::from(&uri); + if file.is_file() { + std::fs::read_to_string(uri)? + .trim_end() + .to_owned() + } else { + uri.into() + } + } else { + rpassword::read_password_from_tty(Some("URI: "))? + }; + + Ok(uri) +} + +/// print formatted pair from uri +pub fn print_from_uri( + uri: &str, + password: Option, + network_override: Ss58AddressFormat, + output: OutputType, +) + where + Pair: sp_core::Pair, + Pair::Public: Into, +{ + let password = password.as_ref().map(|s| s.expose_secret().as_str()); + if let Ok((pair, seed)) = Pair::from_phrase(uri, password.clone()) { + let public_key = pair.public(); + + match output { + OutputType::Json => { + let json = json!({ + "secretPhrase": uri, + "secretSeed": format_seed::(seed), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check(), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!("Secret phrase `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + format_seed::(seed), + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check(), + ); + }, + } + } else if let Ok((pair, seed)) = Pair::from_string_with_seed(uri, password.clone()) { + let public_key = pair.public(); + + match output { + OutputType::Json => { + let json = json!({ + "secretKeyUri": uri, + "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check(), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!("Secret Key URI `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check(), + ); + }, + } + } else if let Ok((public_key, _v)) = Pair::Public::from_string_with_version(uri) { + let v = network_override; + + match output { + OutputType::Json => { + let json = json!({ + "publicKeyUri": uri, + "networkId": String::from(v), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key.clone()), + "ss58Address": public_key.to_ss58check_with_version(v), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!("Public Key URI `{}` is account:\n \ + Network ID/version: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + String::from(v), + format_public_key::(public_key.clone()), + format_account_id::(public_key.clone()), + public_key.to_ss58check_with_version(v), + ); + }, + } + } else { + println!("Invalid phrase/URI given"); + } +} + +/// generate a pair from suri +pub fn pair_from_suri(suri: &str, password: Option) -> Result { + let result = if let Some(pass) = password { + let mut pass_str = pass.expose_secret().clone(); + let pair = P::from_string(suri, Some(&pass_str)); + pass_str.zeroize(); + pair + } else { + P::from_string(suri, None) + }; + + Ok(result.map_err(|err| format!("Invalid phrase {:?}", err))?) +} + +/// formats seed as hex +pub fn format_seed(seed: SeedFor

) -> String { + format!("0x{}", HexDisplay::from(&seed.as_ref())) +} + +/// formats public key as hex +fn format_public_key(public_key: PublicFor

) -> String { + format!("0x{}", HexDisplay::from(&public_key.as_ref())) +} + +/// formats public key as accountId as hex +fn format_account_id(public_key: PublicFor

) -> String + where + PublicFor

: Into, +{ + format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) +} + +/// helper method for decoding hex +pub fn decode_hex>(message: T) -> Result, Error> { + let mut message = message.as_ref(); + if message[..2] == [b'0', b'x'] { + message = &message[2..] + } + hex::decode(message) + .map_err(|e| Error::Other(format!("Invalid hex ({})", e))) +} + +/// checks if message is Some, otherwise reads message from stdin and optionally decodes hex +pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result, Error> { + let mut message = vec![]; + match msg { + Some(m) => { + message = decode_hex(m)?; + }, + None => { + std::io::stdin().lock().read_to_end(&mut message)?; + if should_decode { + message = decode_hex(&message)?; + } + } + } + Ok(message) +} + + +/// Allows for calling $method with appropriate crypto impl. +#[macro_export] +macro_rules! with_crypto_scheme { + ($scheme:expr, $method:ident($($params:expr),*)) => { + with_crypto_scheme!($scheme, $method<>($($params),*)) + }; + ($scheme:expr, $method:ident<$($generics:ty),*>($($params:expr),*)) => { + match $scheme { + $crate::CryptoScheme::Ecdsa => { + $method::($($params),*) + } + $crate::CryptoScheme::Sr25519 => { + $method::($($params),*) + } + $crate::CryptoScheme::Ed25519 => { + $method::($($params),*) + } + } + }; +} diff --git a/bin/utils/subkey/src/vanity.rs b/client/cli/src/commands/vanity.rs similarity index 56% rename from bin/utils/subkey/src/vanity.rs rename to client/cli/src/commands/vanity.rs index d09aeeef25a..e6f923f73c4 100644 --- a/bin/utils/subkey/src/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -5,7 +5,7 @@ // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -16,9 +16,97 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{PublicOf, PublicT, Crypto}; -use sp_core::Pair; +//! implementation of the `vanity` subcommand + +use crate::{ + error, utils, with_crypto_scheme, + CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, +}; +use sp_core::crypto::Ss58Codec; +use structopt::StructOpt; use rand::{rngs::OsRng, RngCore}; +use sp_runtime::traits::IdentifyAccount; + +/// The `vanity` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "vanity", + about = "Generate a seed that provides a vanity address" +)] +pub struct VanityCmd { + /// Desired pattern + #[structopt(long, parse(try_from_str = assert_non_empty_string))] + pattern: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + crypto_scheme: CryptoSchemeFlag, +} + +impl VanityCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let formated_seed = with_crypto_scheme!(self.crypto_scheme.scheme, generate_key(&self.pattern))?; + use utils::print_from_uri; + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &formated_seed, + None, + self.network_scheme.network.clone(), + self.output_scheme.output_type.clone() + ) + ); + Ok(()) + } +} + +/// genertae a key based on given pattern +fn generate_key(desired: &str) -> Result + where + Pair: sp_core::Pair, + Pair::Public: IdentifyAccount, + ::AccountId: Ss58Codec, +{ + println!("Generating key containing pattern '{}'", desired); + + let top = 45 + (desired.len() * 48); + let mut best = 0; + let mut seed = Pair::Seed::default(); + let mut done = 0; + + loop { + if done % 100000 == 0 { + OsRng.fill_bytes(seed.as_mut()); + } else { + next_seed(seed.as_mut()); + } + + let p = Pair::from_seed(&seed); + let ss58 = p.public().into_account().to_ss58check(); + let score = calculate_score(&desired, &ss58); + if score > best || desired.len() < 2 { + best = score; + if best >= top { + println!("best: {} == top: {}", best, top); + return Ok(utils::format_seed::(seed.clone())); + } + } + done += 1; + + if done % good_waypoint(done) == 0 { + println!("{} keys searched; best is {}/{} complete", done, best, top); + } + } +} fn good_waypoint(done: u64) -> u64 { match done { @@ -43,14 +131,6 @@ fn next_seed(seed: &mut [u8]) { } } -/// A structure used to carry both Pair and seed. -/// This should usually NOT been used. If unsure, use Pair. -pub(super) struct KeyPair { - pub pair: C::Pair, - pub seed: ::Seed, - pub score: usize, -} - /// Calculate the score of a key based on the desired /// input. fn calculate_score(_desired: &str, key: &str) -> usize { @@ -64,77 +144,40 @@ fn calculate_score(_desired: &str, key: &str) -> usize { 0 } -/// Validate whether the char is allowed to be used in base58. -/// num 0, lower l, upper I and O are not allowed. -fn validate_base58(c :char) -> bool { - c.is_alphanumeric() && !"0lIO".contains(c) -} - -pub(super) fn generate_key(desired: &str) -> Result, &'static str> where - PublicOf: PublicT, -{ - if desired.is_empty() { - return Err("Pattern must not be empty"); - } - - if !desired.chars().all(validate_base58) { - return Err("Pattern can only contains valid characters in base58 \ - (all alphanumeric except for 0, l, I and O)"); - } - - eprintln!("Generating key containing pattern '{}'", desired); - - let top = 45 + (desired.len() * 48); - let mut best = 0; - let mut seed = ::Seed::default(); - let mut done = 0; - - loop { - if done % 100000 == 0 { - OsRng.fill_bytes(seed.as_mut()); - } else { - next_seed(seed.as_mut()); - } - - let p = C::Pair::from_seed(&seed); - let ss58 = C::ss58_from_pair(&p); - let score = calculate_score(&desired, &ss58); - if score > best || desired.len() < 2 { - best = score; - let keypair = KeyPair { - pair: p, - seed: seed.clone(), - score: score, - }; - if best >= top { - eprintln!("best: {} == top: {}", best, top); - return Ok(keypair); - } - } - done += 1; - - if done % good_waypoint(done) == 0 { - eprintln!("{} keys searched; best is {}/{} complete", done, best, top); - } +/// checks that `pattern` is non-empty +fn assert_non_empty_string(pattern: &str) -> Result { + if pattern.is_empty() { + Err("Pattern must not be empty") + } else { + Ok(pattern.to_string()) } } + #[cfg(test)] mod tests { - use super::super::Ed25519; use super::*; use sp_core::{crypto::Ss58Codec, Pair}; + use sp_core::sr25519; #[cfg(feature = "bench")] use test::Bencher; + use structopt::StructOpt; + + #[test] + fn vanity() { + let vanity = VanityCmd::from_iter(&["vanity", "--pattern", "j"]); + assert!(vanity.run().is_ok()); + } #[test] fn test_generation_with_single_char() { - assert!(generate_key::("j") - .unwrap() - .pair - .public() - .to_ss58check() - .contains("j")); + let seed = generate_key::("j").unwrap(); + assert!( + sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check() + .contains("j")); } #[test] @@ -175,22 +218,6 @@ mod tests { ); } - #[test] - fn test_invalid_pattern() { - assert!(generate_key::("").is_err()); - assert!(generate_key::("0").is_err()); - assert!(generate_key::("l").is_err()); - assert!(generate_key::("I").is_err()); - assert!(generate_key::("O").is_err()); - assert!(generate_key::("!").is_err()); - } - - #[test] - fn test_valid_pattern() { - assert!(generate_key::("o").is_ok()); - assert!(generate_key::("L").is_ok()); - } - #[cfg(feature = "bench")] #[bench] fn bench_paranoiac(b: &mut Bencher) { diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs new file mode 100644 index 00000000000..ad16c11d5e4 --- /dev/null +++ b/client/cli/src/commands/verify.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! implementation of the `verify` subcommand + +use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag}; +use sp_core::{Public, crypto::Ss58Codec}; +use structopt::StructOpt; + +/// The `verify` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "verify", + about = "Verify a signature for a message, provided on STDIN, with a given (public or secret) key" +)] +pub struct VerifyCmd { + /// Signature, hex-encoded. + sig: String, + + /// The public or secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + uri: Option, + + /// Message to verify, if not provided you will be prompted to + /// pass the message via STDIN + #[structopt(long)] + message: Option, + + /// The message on STDIN is hex-encoded data + #[structopt(long)] + hex: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl VerifyCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let message = utils::read_message(self.message.as_ref(), self.hex)?; + let sig_data = utils::decode_hex(&self.sig)?; + let uri = utils::read_uri(self.uri.as_ref())?; + let uri = if uri.starts_with("0x") { + &uri[2..] + } else { + &uri + }; + + with_crypto_scheme!( + self.crypto_scheme.scheme, + verify(sig_data, message, uri) + ) + } +} + +fn verify(sig_data: Vec, message: Vec, uri: &str) -> error::Result<()> + where + Pair: sp_core::Pair, + Pair::Signature: Default + AsMut<[u8]>, +{ + let mut signature = Pair::Signature::default(); + if sig_data.len() != signature.as_ref().len() { + return Err(error::Error::Other(format!( + "signature has an invalid length. read {} bytes, expected {} bytes", + sig_data.len(), + signature.as_ref().len(), + ))); + } + signature.as_mut().copy_from_slice(&sig_data); + + let pubkey = if let Ok(pubkey_vec) = hex::decode(uri) { + Pair::Public::from_slice(pubkey_vec.as_slice()) + } else { + Pair::Public::from_string(uri) + .map_err(|_| { + error::Error::Other(format!("Invalid URI; expecting either a secret URI or a public URI.")) + })? + }; + + if Pair::verify(&signature, &message, &pubkey) { + println!("Signature verifies correctly."); + } else { + return Err(error::Error::Other("Signature invalid.".into())) + } + + Ok(()) +} diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index f091354be15..7404d31fcf7 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -18,6 +18,8 @@ //! Initialization errors. + + /// Result type alias for the CLI. pub type Result = std::result::Result; @@ -32,6 +34,8 @@ pub enum Error { Service(sc_service::Error), /// Client error Client(sp_blockchain::Error), + /// scale codec error + Codec(parity_scale_codec::Error), /// Input error #[from(ignore)] Input(String), @@ -65,6 +69,7 @@ impl std::error::Error for Error { Error::Cli(ref err) => Some(err), Error::Service(ref err) => Some(err), Error::Client(ref err) => Some(err), + Error::Codec(ref err) => Some(err), Error::Input(_) => None, Error::InvalidListenMultiaddress => None, Error::Other(_) => None, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 021f349aaf2..1de74f087f8 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -21,7 +21,7 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] -mod arg_enums; +pub mod arg_enums; mod commands; mod config; mod error; diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index a6eb438cc07..3c04d631445 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -21,7 +21,9 @@ use sc_service::config::KeystoreConfig; use std::fs; use std::path::PathBuf; use structopt::StructOpt; -use sp_core::crypto::SecretString; +use crate::error; +use sp_core::crypto::{SecretString, Zeroize}; +use std::str::FromStr; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -73,7 +75,6 @@ impl KeystoreParams { let mut password = input_keystore_password()?; let secret = std::str::FromStr::from_str(password.as_str()) .map_err(|()| "Error reading password")?; - use sp_core::crypto::Zeroize; password.zeroize(); Some(secret) } @@ -84,7 +85,6 @@ impl KeystoreParams { .map_err(|e| format!("{}", e))?; let secret = std::str::FromStr::from_str(password.as_str()) .map_err(|()| "Error reading password")?; - use sp_core::crypto::Zeroize; password.zeroize(); Some(secret) } else { @@ -98,6 +98,22 @@ impl KeystoreParams { Ok(KeystoreConfig::Path { path, password }) } + + /// helper method to fetch password from `KeyParams` or read from stdin + pub fn read_password(&self) -> error::Result> { + let (password_interactive, password) = (self.password_interactive, self.password.clone()); + + let pass = if password_interactive { + let mut password = rpassword::read_password_from_tty(Some("Key password: "))?; + let pass = Some(FromStr::from_str(&password).map_err(|()| "Error reading password")?); + password.zeroize(); + pass + } else { + password + }; + + Ok(pass) + } } #[cfg(not(target_os = "unknown"))] diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index f648337ed0a..5245c1220fb 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -25,8 +25,11 @@ mod pruning_params; mod shared_params; mod transaction_pool_params; -use std::{fmt::Debug, str::FromStr}; +use std::{fmt::Debug, str::FromStr, convert::TryFrom}; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, NumberFor}}; +use sp_core::crypto::Ss58AddressFormat; +use crate::arg_enums::{OutputType, CryptoScheme}; +use structopt::StructOpt; pub use crate::params::database_params::*; pub use crate::params::import_params::*; @@ -39,10 +42,10 @@ pub use crate::params::shared_params::*; pub use crate::params::transaction_pool_params::*; /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. -#[derive(Debug)] -pub struct BlockNumber(String); +#[derive(Debug, Clone)] +pub struct GenericNumber(String); -impl FromStr for BlockNumber { +impl FromStr for GenericNumber { type Err = String; fn from_str(block_number: &str) -> Result { @@ -57,15 +60,15 @@ impl FromStr for BlockNumber { } } -impl BlockNumber { +impl GenericNumber { /// Wrapper on top of `std::str::parse` but with `Error` as a `String` /// /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate /// documentation. pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, + where + N: FromStr, + N::Err: std::fmt::Debug, { FromStr::from_str(&self.0).map_err(|e| format!("Failed to parse block number: {:?}", e)) } @@ -89,7 +92,7 @@ impl FromStr for BlockNumberOrHash { Ok(Self(block_number.into())) } } else { - BlockNumber::from_str(block_number).map(|v| Self(v.0)) + GenericNumber::from_str(block_number).map(|v| Self(v.0)) } } } @@ -109,11 +112,55 @@ impl BlockNumberOrHash { .map_err(|e| format!("Failed to parse block hash: {:?}", e))? )) } else { - BlockNumber(self.0.clone()).parse().map(BlockId::Number) + GenericNumber(self.0.clone()).parse().map(BlockId::Number) } } } + +/// Optional flag for specifying crypto algorithm +#[derive(Debug, StructOpt)] +pub struct CryptoSchemeFlag { + /// cryptography scheme + #[structopt( + long, + value_name = "SCHEME", + possible_values = &CryptoScheme::variants(), + case_insensitive = true, + default_value = "Sr25519" + )] + pub scheme: CryptoScheme, +} + +/// Optional flag for specifying output type +#[derive(Debug, StructOpt)] +pub struct OutputTypeFlag { + /// output format + #[structopt( + long, + value_name = "FORMAT", + possible_values = &OutputType::variants(), + case_insensitive = true, + default_value = "Text" + )] + pub output_type: OutputType, +} + +/// Optional flag for specifying network scheme +#[derive(Debug, StructOpt)] +pub struct NetworkSchemeFlag { + /// network address format + #[structopt( + long, + value_name = "NETWORK", + possible_values = &Ss58AddressFormat::all_names()[..], + parse(try_from_str = Ss58AddressFormat::try_from), + case_insensitive = true, + default_value = "polkadot" + )] + pub network: Ss58AddressFormat, +} + #[cfg(test)] mod tests { use super::*; diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index c6b43677f2e..f65ed6b99a6 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -177,7 +177,6 @@ use sp_runtime::{ }, }; use frame_system::{self as system, ensure_signed, ensure_root}; - pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub trait WeightInfo { diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 46f138fccd5..851d2b8a4b6 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -19,7 +19,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 14 | | } 15 | | } | |_- type in trait -16 | +16 | 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` | diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index cc2a5f05cd5..c3e48500360 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -19,7 +19,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 14 | | } 15 | | } | |_- type in trait -16 | +16 | 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` | diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index efacf0b2e76..d6f0850d9ed 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -317,8 +317,7 @@ lazy_static::lazy_static! { macro_rules! ss58_address_format { ( $( $identifier:tt => ($number:expr, $name:expr, $desc:tt) )* ) => ( /// A known address (sub)format/network ID for SS58. - #[derive(Copy, Clone, PartialEq, Eq)] - #[cfg_attr(feature = "std", derive(Debug))] + #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] pub enum Ss58AddressFormat { $(#[doc = $desc] $identifier),*, /// Use a manually provided numeric value. @@ -337,6 +336,12 @@ macro_rules! ss58_address_format { ]; impl Ss58AddressFormat { + /// names of all address formats + pub fn all_names() -> &'static [&'static str] { + &[ + $($name),*, + ] + } /// All known address formats. pub fn all() -> &'static [Ss58AddressFormat] { &ALL_SS58_ADDRESS_FORMATS @@ -380,17 +385,29 @@ macro_rules! ss58_address_format { } } + /// Error encountered while parsing `Ss58AddressFormat` from &'_ str + /// unit struct for now. + #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] + pub struct ParseError; + impl<'a> TryFrom<&'a str> for Ss58AddressFormat { - type Error = (); + type Error = ParseError; - fn try_from(x: &'a str) -> Result { + fn try_from(x: &'a str) -> Result { match x { $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map_err(|_| ()).and_then(TryFrom::try_from), + a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), } } } + #[cfg(feature = "std")] + impl std::fmt::Display for ParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "failed to parse network value as u8") + } + } + #[cfg(feature = "std")] impl Default for Ss58AddressFormat { fn default() -> Self { diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index c8a289639d4..2a40972166e 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -50,6 +50,7 @@ pub use impl_serde::serialize as bytes; #[cfg(feature = "full_crypto")] pub mod hashing; + #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; pub mod hexdisplay; diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 5d65c13c664..eb8bbb38a6f 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -159,7 +159,7 @@ impl BuildStorage for () { fn assimilate_storage( &self, _: &mut sp_core::storage::Storage, - )-> Result<(), String> { + ) -> Result<(), String> { Err("`assimilate_storage` not implemented for `()`".into()) } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index fedbff5a109..f5e30de838a 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -59,6 +59,7 @@ use cfg_if::cfg_if; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AuthorityId, SlotNumber, AllowedSlots}; + pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; // Include the WASM binary diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml new file mode 100644 index 00000000000..913297819c0 --- /dev/null +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "substrate-frame-cli" +version = "2.0.0-rc5" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "cli interface for FRAME" +documentation = "https://docs.rs/substrate-frame-cli" + +[dependencies] +sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } +sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } +sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +structopt = "0.3.8" +frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } + +[dev-dependencies] + +[features] +default = [] diff --git a/utils/frame/frame-utilities-cli/src/lib.rs b/utils/frame/frame-utilities-cli/src/lib.rs new file mode 100644 index 00000000000..872cfc99a63 --- /dev/null +++ b/utils/frame/frame-utilities-cli/src/lib.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! frame-system CLI utilities + +mod module_id; + +pub use module_id::ModuleIdCmd; + diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/module_id.rs new file mode 100644 index 00000000000..3739d668e3d --- /dev/null +++ b/utils/frame/frame-utilities-cli/src/module_id.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `moduleid` subcommand + +use sc_cli::{ + Error, utils::print_from_uri, CryptoSchemeFlag, + OutputTypeFlag, KeystoreParams, with_crypto_scheme, +}; +use sp_runtime::ModuleId; +use sp_runtime::traits::AccountIdConversion; +use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; +use std::convert::{TryInto, TryFrom}; +use structopt::StructOpt; + +/// The `moduleid` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "moduleid", + about = "Inspect a module ID address" +)] +pub struct ModuleIdCmd { + /// The module ID used to derive the account + id: String, + + /// network address format + #[structopt( + long, + value_name = "NETWORK", + possible_values = &Ss58AddressFormat::all_names()[..], + parse(try_from_str = Ss58AddressFormat::try_from), + case_insensitive = true, + default_value = "substrate" + )] + pub network: Ss58AddressFormat, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, +} + +impl ModuleIdCmd { + /// runs the command + pub fn run(&self) -> Result<(), Error> + where + R: frame_system::Trait, + R::AccountId: Ss58Codec, + { + if self.id.len() != 8 { + Err("a module id must be a string of 8 characters")? + } + let password = self.keystore_params.read_password()?; + + let id_fixed_array: [u8; 8] = self.id.as_bytes() + .try_into() + .map_err(|_| "Cannot convert argument to moduleid: argument should be 8-character string")?; + + let account_id: R::AccountId = ModuleId(id_fixed_array).into_account(); + let network = self.network; + + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &account_id.to_ss58check_with_version(network), + password, + network, + self.output_scheme.output_type.clone() + ) + ); + + Ok(()) + } +} + -- GitLab From 9c679b37c129bdbe7e5be05ae585697041ad4a4c Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 20 Aug 2020 16:38:00 +0200 Subject: [PATCH 112/132] Clean shutdown for subcommands (#6909) --- client/cli/src/runner.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index bdbf55eb832..f2558b1bb60 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -96,7 +96,7 @@ pub fn build_runtime() -> std::result::Result( mut tokio_runtime: tokio::runtime::Runtime, future: FUT, - mut task_manager: TaskManager, + task_manager: TaskManager, ) -> Result<()> where FUT: Future> + future::Future, @@ -106,9 +106,7 @@ where pin_mut!(f); tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; - - task_manager.terminate(); - drop(tokio_runtime); + tokio_runtime.block_on(task_manager.clean_shutdown()); Ok(()) } -- GitLab From be8bb186d87b9d2b47a2907c9b51ae1e252362c3 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Thu, 20 Aug 2020 17:04:42 +0200 Subject: [PATCH 113/132] prepping for releasing rc6 (#6922) * Bump version * update test-utils crates to be ready for publishing * adding changelog * Adding automaticly generated READMEs * fixing versions * another version mishap --- Cargo.lock | 370 +++++++++--------- bin/node-template/node/Cargo.toml | 52 +-- bin/node-template/pallets/template/Cargo.toml | 12 +- bin/node-template/pallets/template/README.md | 1 + bin/node-template/runtime/Cargo.toml | 50 +-- bin/node/bench/Cargo.toml | 34 +- bin/node/browser-testing/Cargo.toml | 6 +- bin/node/cli/Cargo.toml | 122 +++--- bin/node/executor/Cargo.toml | 50 +-- bin/node/inspect/Cargo.toml | 14 +- bin/node/primitives/Cargo.toml | 12 +- bin/node/rpc-client/Cargo.toml | 6 +- bin/node/rpc/Cargo.toml | 42 +- bin/node/runtime/Cargo.toml | 116 +++--- bin/node/testing/Cargo.toml | 68 ++-- bin/utils/chain-spec-builder/Cargo.toml | 10 +- bin/utils/chain-spec-builder/README.md | 1 + bin/utils/subkey/Cargo.toml | 14 +- bin/utils/subkey/README.md | 1 + client/api/Cargo.toml | 44 +-- client/api/README.md | 3 + client/authority-discovery/Cargo.toml | 24 +- client/authority-discovery/README.md | 9 + client/basic-authorship/Cargo.toml | 30 +- client/basic-authorship/README.md | 32 ++ client/block-builder/Cargo.toml | 22 +- client/block-builder/README.md | 9 + client/chain-spec/Cargo.toml | 14 +- client/chain-spec/README.md | 92 +++++ client/chain-spec/derive/Cargo.toml | 2 +- client/cli/Cargo.toml | 34 +- client/cli/README.md | 3 + client/consensus/aura/Cargo.toml | 50 +-- client/consensus/aura/README.md | 15 + client/consensus/babe/Cargo.toml | 60 +-- client/consensus/babe/README.md | 48 +++ client/consensus/babe/rpc/Cargo.toml | 30 +- client/consensus/babe/rpc/README.md | 3 + client/consensus/common/Cargo.toml | 10 +- client/consensus/common/README.md | 3 + client/consensus/epochs/Cargo.toml | 10 +- client/consensus/epochs/README.md | 3 + client/consensus/manual-seal/Cargo.toml | 26 +- client/consensus/manual-seal/README.md | 4 + client/consensus/pow/Cargo.toml | 24 +- client/consensus/pow/README.md | 16 + client/consensus/slots/Cargo.toml | 26 +- client/consensus/slots/README.md | 7 + client/consensus/uncles/Cargo.toml | 14 +- client/consensus/uncles/README.md | 3 + client/db/Cargo.toml | 30 +- client/db/README.md | 11 + client/executor/Cargo.toml | 40 +- client/executor/README.md | 13 + client/executor/common/Cargo.toml | 12 +- client/executor/common/README.md | 3 + client/executor/runtime-test/Cargo.toml | 14 +- client/executor/wasmi/Cargo.toml | 12 +- client/executor/wasmi/README.md | 3 + client/executor/wasmtime/Cargo.toml | 12 +- client/executor/wasmtime/README.md | 1 + client/finality-grandpa/Cargo.toml | 56 +-- client/finality-grandpa/README.md | 39 ++ client/finality-grandpa/rpc/Cargo.toml | 26 +- client/finality-grandpa/rpc/README.md | 3 + client/informant/Cargo.toml | 10 +- client/informant/README.md | 3 + client/keystore/Cargo.toml | 6 +- client/keystore/README.md | 3 + client/light/Cargo.toml | 2 +- client/light/README.md | 3 + client/network-gossip/Cargo.toml | 8 +- client/network-gossip/README.md | 41 ++ client/network/Cargo.toml | 32 +- client/network/README.md | 226 +++++++++++ client/network/test/Cargo.toml | 26 +- client/offchain/Cargo.toml | 26 +- client/offchain/README.md | 18 + client/peerset/Cargo.toml | 4 +- client/peerset/README.md | 4 + client/proposer-metrics/Cargo.toml | 4 +- client/proposer-metrics/README.md | 3 + client/rpc-api/Cargo.toml | 14 +- client/rpc-api/README.md | 5 + client/rpc-servers/Cargo.toml | 4 +- client/rpc-servers/README.md | 3 + client/rpc/Cargo.toml | 44 +-- client/rpc/README.md | 5 + client/service/Cargo.toml | 72 ++-- client/service/README.md | 4 + client/service/test/Cargo.toml | 42 +- client/state-db/Cargo.toml | 6 +- client/state-db/README.md | 16 + client/telemetry/Cargo.toml | 2 +- client/telemetry/README.md | 45 +++ client/tracing/Cargo.toml | 4 +- client/tracing/README.md | 11 + client/transaction-pool/Cargo.toml | 32 +- client/transaction-pool/README.md | 3 + client/transaction-pool/graph/Cargo.toml | 14 +- client/transaction-pool/graph/README.md | 8 + docs/CHANGELOG.md | 37 ++ frame/assets/Cargo.toml | 14 +- frame/assets/README.md | 116 ++++++ frame/atomic-swap/Cargo.toml | 16 +- frame/atomic-swap/README.md | 23 ++ frame/aura/Cargo.toml | 26 +- frame/aura/README.md | 28 ++ frame/authority-discovery/Cargo.toml | 22 +- frame/authority-discovery/README.md | 6 + frame/authorship/Cargo.toml | 18 +- frame/authorship/README.md | 5 + frame/babe/Cargo.toml | 46 +-- frame/babe/README.md | 4 + frame/balances/Cargo.toml | 18 +- frame/balances/README.md | 122 ++++++ frame/benchmark/Cargo.toml | 14 +- frame/benchmark/README.md | 5 + frame/benchmarking/Cargo.toml | 18 +- frame/benchmarking/README.md | 3 + frame/collective/Cargo.toml | 18 +- frame/collective/README.md | 22 ++ frame/contracts/Cargo.toml | 26 +- frame/contracts/README.md | 64 +++ frame/contracts/common/Cargo.toml | 6 +- frame/contracts/common/README.md | 3 + frame/contracts/rpc/Cargo.toml | 16 +- frame/contracts/rpc/README.md | 3 + frame/contracts/rpc/runtime-api/Cargo.toml | 10 +- frame/contracts/rpc/runtime-api/README.md | 7 + frame/democracy/Cargo.toml | 24 +- frame/democracy/README.md | 135 +++++++ frame/elections-phragmen/Cargo.toml | 22 +- frame/elections-phragmen/README.md | 67 ++++ frame/elections/Cargo.toml | 16 +- frame/elections/README.md | 7 + frame/evm/Cargo.toml | 18 +- frame/evm/README.md | 3 + frame/example-offchain-worker/Cargo.toml | 14 +- frame/example-offchain-worker/README.md | 26 ++ frame/example/Cargo.toml | 18 +- frame/example/README.md | 237 +++++++++++ frame/executive/Cargo.toml | 26 +- frame/executive/README.md | 61 +++ frame/finality-tracker/Cargo.toml | 18 +- frame/finality-tracker/README.md | 3 + frame/generic-asset/Cargo.toml | 14 +- frame/generic-asset/README.md | 131 +++++++ frame/grandpa/Cargo.toml | 44 +-- frame/grandpa/README.md | 12 + frame/identity/Cargo.toml | 18 +- frame/identity/README.md | 56 +++ frame/im-online/Cargo.toml | 24 +- frame/im-online/README.md | 51 +++ frame/indices/Cargo.toml | 20 +- frame/indices/README.md | 4 + frame/membership/Cargo.toml | 14 +- frame/membership/README.md | 6 + frame/metadata/Cargo.toml | 6 +- frame/metadata/README.md | 7 + frame/multisig/Cargo.toml | 20 +- frame/multisig/README.md | 29 ++ frame/nicks/Cargo.toml | 16 +- frame/nicks/README.md | 23 ++ frame/offences/Cargo.toml | 18 +- frame/offences/README.md | 5 + frame/offences/benchmarking/Cargo.toml | 36 +- frame/offences/benchmarking/README.md | 3 + frame/proxy/Cargo.toml | 22 +- frame/proxy/README.md | 17 + frame/randomness-collective-flip/Cargo.toml | 14 +- frame/randomness-collective-flip/README.md | 38 ++ frame/recovery/Cargo.toml | 16 +- frame/recovery/README.md | 134 +++++++ frame/scheduler/Cargo.toml | 18 +- frame/scheduler/README.md | 34 ++ frame/scored-pool/Cargo.toml | 16 +- frame/scored-pool/README.md | 66 ++++ frame/session/Cargo.toml | 24 +- frame/session/README.md | 83 ++++ frame/session/benchmarking/Cargo.toml | 28 +- frame/session/benchmarking/README.md | 3 + frame/society/Cargo.toml | 16 +- frame/society/README.md | 228 +++++++++++ frame/staking/Cargo.toml | 38 +- frame/staking/README.md | 249 ++++++++++++ frame/staking/fuzzer/Cargo.toml | 26 +- frame/staking/reward-curve/Cargo.toml | 4 +- frame/sudo/Cargo.toml | 14 +- frame/sudo/README.md | 70 ++++ frame/support/Cargo.toml | 24 +- frame/support/README.md | 3 + frame/support/procedural/Cargo.toml | 4 +- frame/support/procedural/tools/Cargo.toml | 4 +- .../procedural/tools/derive/Cargo.toml | 2 +- frame/support/test/Cargo.toml | 16 +- frame/system/Cargo.toml | 18 +- frame/system/README.md | 75 ++++ frame/system/benchmarking/Cargo.toml | 16 +- frame/system/benchmarking/README.md | 1 + frame/system/rpc/runtime-api/Cargo.toml | 4 +- frame/system/rpc/runtime-api/README.md | 7 + frame/timestamp/Cargo.toml | 22 +- frame/timestamp/README.md | 74 ++++ frame/transaction-payment/Cargo.toml | 20 +- frame/transaction-payment/README.md | 16 + frame/transaction-payment/rpc/Cargo.toml | 14 +- frame/transaction-payment/rpc/README.md | 3 + .../rpc/runtime-api/Cargo.toml | 10 +- .../rpc/runtime-api/README.md | 3 + frame/treasury/Cargo.toml | 20 +- frame/treasury/README.md | 72 ++++ frame/utility/Cargo.toml | 20 +- frame/utility/README.md | 38 ++ frame/vesting/Cargo.toml | 20 +- frame/vesting/README.md | 31 ++ primitives/allocator/Cargo.toml | 8 +- primitives/allocator/README.md | 6 + primitives/api/Cargo.toml | 16 +- primitives/api/README.md | 17 + primitives/api/proc-macro/Cargo.toml | 2 +- primitives/api/test/Cargo.toml | 22 +- primitives/application-crypto/Cargo.toml | 8 +- primitives/application-crypto/README.md | 3 + primitives/application-crypto/test/Cargo.toml | 12 +- primitives/arithmetic/Cargo.toml | 6 +- primitives/arithmetic/README.md | 3 + primitives/arithmetic/fuzzer/Cargo.toml | 4 +- primitives/authority-discovery/Cargo.toml | 10 +- primitives/authority-discovery/README.md | 3 + primitives/authorship/Cargo.toml | 8 +- primitives/authorship/README.md | 3 + primitives/block-builder/Cargo.toml | 10 +- primitives/block-builder/README.md | 3 + primitives/blockchain/Cargo.toml | 12 +- primitives/blockchain/README.md | 3 + primitives/chain-spec/Cargo.toml | 2 +- primitives/chain-spec/README.md | 3 + primitives/consensus/aura/Cargo.toml | 14 +- primitives/consensus/aura/README.md | 3 + primitives/consensus/babe/Cargo.toml | 22 +- primitives/consensus/babe/README.md | 3 + primitives/consensus/common/Cargo.toml | 24 +- primitives/consensus/common/README.md | 7 + primitives/consensus/pow/Cargo.toml | 10 +- primitives/consensus/pow/README.md | 3 + primitives/consensus/slots/Cargo.toml | 2 +- primitives/consensus/slots/README.md | 3 + primitives/consensus/vrf/Cargo.toml | 8 +- primitives/consensus/vrf/README.md | 3 + primitives/core/Cargo.toml | 14 +- primitives/database/Cargo.toml | 2 +- primitives/database/README.md | 3 + primitives/debug-derive/Cargo.toml | 2 +- primitives/externalities/Cargo.toml | 6 +- primitives/externalities/README.md | 9 + primitives/finality-grandpa/Cargo.toml | 12 +- primitives/finality-grandpa/README.md | 3 + primitives/finality-tracker/Cargo.toml | 6 +- primitives/finality-tracker/README.md | 3 + primitives/inherents/Cargo.toml | 6 +- primitives/inherents/README.md | 17 + primitives/io/Cargo.toml | 18 +- primitives/io/README.md | 3 + primitives/keyring/Cargo.toml | 6 +- primitives/keyring/README.md | 3 + primitives/npos-elections/Cargo.toml | 12 +- primitives/npos-elections/README.md | 12 + primitives/npos-elections/compact/Cargo.toml | 2 +- primitives/npos-elections/fuzzer/Cargo.toml | 6 +- primitives/offchain/Cargo.toml | 10 +- primitives/offchain/README.md | 3 + primitives/panic-handler/Cargo.toml | 2 +- primitives/panic-handler/README.md | 10 + primitives/rpc/Cargo.toml | 4 +- primitives/rpc/README.md | 3 + primitives/runtime-interface/Cargo.toml | 22 +- primitives/runtime-interface/README.md | 88 +++++ .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../test-wasm-deprecated/Cargo.toml | 10 +- .../runtime-interface/test-wasm/Cargo.toml | 10 +- primitives/runtime-interface/test/Cargo.toml | 18 +- primitives/runtime/Cargo.toml | 16 +- primitives/runtime/README.md | 3 + primitives/sandbox/Cargo.toml | 10 +- primitives/sandbox/README.md | 21 + primitives/serializer/Cargo.toml | 2 +- primitives/serializer/README.md | 6 + primitives/session/Cargo.toml | 12 +- primitives/session/README.md | 3 + primitives/staking/Cargo.toml | 6 +- primitives/staking/README.md | 4 + primitives/state-machine/Cargo.toml | 12 +- primitives/state-machine/README.md | 3 + primitives/std/Cargo.toml | 2 +- primitives/std/README.md | 4 + primitives/storage/Cargo.toml | 6 +- primitives/storage/README.md | 3 + primitives/test-primitives/Cargo.toml | 8 +- primitives/timestamp/Cargo.toml | 10 +- primitives/timestamp/README.md | 3 + primitives/tracing/Cargo.toml | 2 +- primitives/tracing/README.md | 15 + primitives/transaction-pool/Cargo.toml | 8 +- primitives/transaction-pool/README.md | 3 + primitives/trie/Cargo.toml | 8 +- primitives/trie/README.md | 3 + primitives/utils/Cargo.toml | 2 +- primitives/utils/README.md | 3 + primitives/version/Cargo.toml | 6 +- primitives/version/README.md | 3 + primitives/wasm-interface/Cargo.toml | 4 +- primitives/wasm-interface/README.md | 3 + test-utils/Cargo.toml | 7 +- test-utils/client/Cargo.toml | 26 +- test-utils/derive/Cargo.toml | 3 +- test-utils/runtime/Cargo.toml | 60 +-- test-utils/runtime/client/Cargo.toml | 26 +- .../runtime/transaction-pool/Cargo.toml | 12 +- test-utils/test-crate/Cargo.toml | 5 +- utils/browser/Cargo.toml | 12 +- utils/browser/README.md | 1 + utils/build-script-utils/Cargo.toml | 2 +- utils/build-script-utils/README.md | 3 + utils/fork-tree/Cargo.toml | 2 +- utils/fork-tree/README.md | 4 + utils/frame/benchmarking-cli/Cargo.toml | 20 +- utils/frame/benchmarking-cli/README.md | 1 + utils/frame/frame-utilities-cli/Cargo.toml | 10 +- utils/frame/frame-utilities-cli/README.md | 3 + utils/frame/rpc/support/Cargo.toml | 10 +- utils/frame/rpc/support/README.md | 4 + utils/frame/rpc/system/Cargo.toml | 24 +- utils/frame/rpc/system/README.md | 3 + utils/prometheus/Cargo.toml | 2 +- 335 files changed, 5490 insertions(+), 1879 deletions(-) create mode 100644 bin/node-template/pallets/template/README.md create mode 100644 bin/utils/chain-spec-builder/README.md create mode 100644 bin/utils/subkey/README.md create mode 100644 client/api/README.md create mode 100644 client/authority-discovery/README.md create mode 100644 client/basic-authorship/README.md create mode 100644 client/block-builder/README.md create mode 100644 client/chain-spec/README.md create mode 100644 client/cli/README.md create mode 100644 client/consensus/aura/README.md create mode 100644 client/consensus/babe/README.md create mode 100644 client/consensus/babe/rpc/README.md create mode 100644 client/consensus/common/README.md create mode 100644 client/consensus/epochs/README.md create mode 100644 client/consensus/manual-seal/README.md create mode 100644 client/consensus/pow/README.md create mode 100644 client/consensus/slots/README.md create mode 100644 client/consensus/uncles/README.md create mode 100644 client/db/README.md create mode 100644 client/executor/README.md create mode 100644 client/executor/common/README.md create mode 100644 client/executor/wasmi/README.md create mode 100644 client/executor/wasmtime/README.md create mode 100644 client/finality-grandpa/README.md create mode 100644 client/finality-grandpa/rpc/README.md create mode 100644 client/informant/README.md create mode 100644 client/keystore/README.md create mode 100644 client/light/README.md create mode 100644 client/network-gossip/README.md create mode 100644 client/network/README.md create mode 100644 client/offchain/README.md create mode 100644 client/peerset/README.md create mode 100644 client/proposer-metrics/README.md create mode 100644 client/rpc-api/README.md create mode 100644 client/rpc-servers/README.md create mode 100644 client/rpc/README.md create mode 100644 client/service/README.md create mode 100644 client/state-db/README.md create mode 100644 client/telemetry/README.md create mode 100644 client/tracing/README.md create mode 100644 client/transaction-pool/README.md create mode 100644 client/transaction-pool/graph/README.md create mode 100644 frame/assets/README.md create mode 100644 frame/atomic-swap/README.md create mode 100644 frame/aura/README.md create mode 100644 frame/authority-discovery/README.md create mode 100644 frame/authorship/README.md create mode 100644 frame/babe/README.md create mode 100644 frame/balances/README.md create mode 100644 frame/benchmark/README.md create mode 100644 frame/benchmarking/README.md create mode 100644 frame/collective/README.md create mode 100644 frame/contracts/README.md create mode 100644 frame/contracts/common/README.md create mode 100644 frame/contracts/rpc/README.md create mode 100644 frame/contracts/rpc/runtime-api/README.md create mode 100644 frame/democracy/README.md create mode 100644 frame/elections-phragmen/README.md create mode 100644 frame/elections/README.md create mode 100644 frame/evm/README.md create mode 100644 frame/example-offchain-worker/README.md create mode 100644 frame/example/README.md create mode 100644 frame/executive/README.md create mode 100644 frame/finality-tracker/README.md create mode 100644 frame/generic-asset/README.md create mode 100644 frame/grandpa/README.md create mode 100644 frame/identity/README.md create mode 100644 frame/im-online/README.md create mode 100644 frame/indices/README.md create mode 100644 frame/membership/README.md create mode 100644 frame/metadata/README.md create mode 100644 frame/multisig/README.md create mode 100644 frame/nicks/README.md create mode 100644 frame/offences/README.md create mode 100644 frame/offences/benchmarking/README.md create mode 100644 frame/proxy/README.md create mode 100644 frame/randomness-collective-flip/README.md create mode 100644 frame/recovery/README.md create mode 100644 frame/scheduler/README.md create mode 100644 frame/scored-pool/README.md create mode 100644 frame/session/README.md create mode 100644 frame/session/benchmarking/README.md create mode 100644 frame/society/README.md create mode 100644 frame/staking/README.md create mode 100644 frame/sudo/README.md create mode 100644 frame/support/README.md create mode 100644 frame/system/README.md create mode 100644 frame/system/benchmarking/README.md create mode 100644 frame/system/rpc/runtime-api/README.md create mode 100644 frame/timestamp/README.md create mode 100644 frame/transaction-payment/README.md create mode 100644 frame/transaction-payment/rpc/README.md create mode 100644 frame/transaction-payment/rpc/runtime-api/README.md create mode 100644 frame/treasury/README.md create mode 100644 frame/utility/README.md create mode 100644 frame/vesting/README.md create mode 100644 primitives/allocator/README.md create mode 100644 primitives/api/README.md create mode 100644 primitives/application-crypto/README.md create mode 100644 primitives/arithmetic/README.md create mode 100644 primitives/authority-discovery/README.md create mode 100644 primitives/authorship/README.md create mode 100644 primitives/block-builder/README.md create mode 100644 primitives/blockchain/README.md create mode 100644 primitives/chain-spec/README.md create mode 100644 primitives/consensus/aura/README.md create mode 100644 primitives/consensus/babe/README.md create mode 100644 primitives/consensus/common/README.md create mode 100644 primitives/consensus/pow/README.md create mode 100644 primitives/consensus/slots/README.md create mode 100644 primitives/consensus/vrf/README.md create mode 100644 primitives/database/README.md create mode 100644 primitives/externalities/README.md create mode 100644 primitives/finality-grandpa/README.md create mode 100644 primitives/finality-tracker/README.md create mode 100644 primitives/inherents/README.md create mode 100644 primitives/io/README.md create mode 100644 primitives/keyring/README.md create mode 100644 primitives/npos-elections/README.md create mode 100644 primitives/offchain/README.md create mode 100644 primitives/panic-handler/README.md create mode 100644 primitives/rpc/README.md create mode 100644 primitives/runtime-interface/README.md create mode 100644 primitives/runtime/README.md create mode 100644 primitives/sandbox/README.md create mode 100644 primitives/serializer/README.md create mode 100644 primitives/session/README.md create mode 100644 primitives/staking/README.md create mode 100644 primitives/state-machine/README.md create mode 100644 primitives/std/README.md create mode 100644 primitives/storage/README.md create mode 100644 primitives/timestamp/README.md create mode 100644 primitives/tracing/README.md create mode 100644 primitives/transaction-pool/README.md create mode 100644 primitives/trie/README.md create mode 100644 primitives/utils/README.md create mode 100644 primitives/version/README.md create mode 100644 primitives/wasm-interface/README.md create mode 100644 utils/browser/README.md create mode 100644 utils/build-script-utils/README.md create mode 100644 utils/fork-tree/README.md create mode 100644 utils/frame/benchmarking-cli/README.md create mode 100644 utils/frame/frame-utilities-cli/README.md create mode 100644 utils/frame/rpc/support/README.md create mode 100644 utils/frame/rpc/system/README.md diff --git a/Cargo.lock b/Cargo.lock index 1b0291023e8..70b3581bb90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -717,7 +717,7 @@ dependencies = [ [[package]] name = "chain-spec-builder" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "ansi_term 0.12.1", "node-cli", @@ -1596,14 +1596,14 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -1621,7 +1621,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "parity-scale-codec", @@ -1638,7 +1638,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -1658,7 +1658,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0-rc5" +version = "11.0.0-rc6" dependencies = [ "parity-scale-codec", "serde", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "bitmask", "frame-metadata", @@ -1695,7 +1695,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support-procedural-tools", "proc-macro2", @@ -1705,7 +1705,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -1716,7 +1716,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "proc-macro2", "quote", @@ -1725,7 +1725,7 @@ dependencies = [ [[package]] name = "frame-support-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "parity-scale-codec", @@ -1743,7 +1743,7 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.2.11", "frame-support", @@ -1761,7 +1761,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -1776,7 +1776,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -3769,7 +3769,7 @@ dependencies = [ [[package]] name = "node-bench" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "fs_extra", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "node-browser-testing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -3824,7 +3824,7 @@ dependencies = [ [[package]] name = "node-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "assert_cmd", "frame-benchmarking-cli", @@ -3901,7 +3901,7 @@ dependencies = [ [[package]] name = "node-executor" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.3.3", "frame-benchmarking", @@ -3935,7 +3935,7 @@ dependencies = [ [[package]] name = "node-inspect" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "log", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "node-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-system", "parity-scale-codec", @@ -3964,7 +3964,7 @@ dependencies = [ [[package]] name = "node-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "jsonrpc-core", "jsonrpc-pubsub", @@ -3992,7 +3992,7 @@ dependencies = [ [[package]] name = "node-rpc-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "env_logger", "futures 0.1.29", @@ -4005,7 +4005,7 @@ dependencies = [ [[package]] name = "node-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-executive", @@ -4074,7 +4074,7 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "jsonrpc-core", "node-template-runtime", @@ -4107,7 +4107,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-executive", "frame-support", @@ -4140,7 +4140,7 @@ dependencies = [ [[package]] name = "node-testing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.3.3", "frame-support", @@ -4348,7 +4348,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4362,7 +4362,7 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4377,7 +4377,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4399,7 +4399,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4417,7 +4417,7 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4433,7 +4433,7 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4462,7 +4462,7 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4478,7 +4478,7 @@ dependencies = [ [[package]] name = "pallet-benchmark" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4492,7 +4492,7 @@ dependencies = [ [[package]] name = "pallet-collective" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4509,7 +4509,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "assert_matches", "bitflags", @@ -4537,7 +4537,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -4546,7 +4546,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4565,7 +4565,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4576,7 +4576,7 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4596,7 +4596,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4612,7 +4612,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4631,7 +4631,7 @@ dependencies = [ [[package]] name = "pallet-evm" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "evm", "frame-support", @@ -4653,7 +4653,7 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4669,7 +4669,7 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4684,7 +4684,7 @@ dependencies = [ [[package]] name = "pallet-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4701,7 +4701,7 @@ dependencies = [ [[package]] name = "pallet-generic-asset" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4715,7 +4715,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "finality-grandpa", "frame-benchmarking", @@ -4744,7 +4744,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4761,7 +4761,7 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4780,7 +4780,7 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4797,7 +4797,7 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4811,7 +4811,7 @@ dependencies = [ [[package]] name = "pallet-multisig" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4827,7 +4827,7 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4858,7 +4858,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4883,7 +4883,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4900,7 +4900,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4914,7 +4914,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "enumflags2", "frame-support", @@ -4930,7 +4930,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -4946,7 +4946,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4961,7 +4961,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -4982,7 +4982,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -5004,7 +5004,7 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -5020,7 +5020,7 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "env_logger", "frame-benchmarking", @@ -5071,7 +5071,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5082,7 +5082,7 @@ dependencies = [ [[package]] name = "pallet-sudo" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -5096,7 +5096,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -5108,7 +5108,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -5126,7 +5126,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -5144,7 +5144,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -5161,7 +5161,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "parity-scale-codec", @@ -5174,7 +5174,7 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -5191,7 +5191,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-benchmarking", "frame-support", @@ -5207,7 +5207,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "enumflags2", "frame-benchmarking", @@ -6379,7 +6379,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "bytes 0.5.6", "derive_more", @@ -6409,7 +6409,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -6435,7 +6435,7 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6453,7 +6453,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "impl-trait-for-tuples", "sc-chain-spec-derive", @@ -6468,7 +6468,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6478,7 +6478,7 @@ dependencies = [ [[package]] name = "sc-cli" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "ansi_term 0.12.1", "atty", @@ -6527,7 +6527,7 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "fnv", @@ -6565,7 +6565,7 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "blake2-rfc", "env_logger", @@ -6599,7 +6599,7 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "sc-client-api", "sp-blockchain", @@ -6609,7 +6609,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "env_logger", @@ -6647,7 +6647,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "env_logger", @@ -6700,7 +6700,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -6728,7 +6728,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6740,7 +6740,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "derive_more", @@ -6770,7 +6770,7 @@ dependencies = [ [[package]] name = "sc-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -6791,7 +6791,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -6814,7 +6814,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "log", "sc-client-api", @@ -6827,7 +6827,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "derive_more", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "log", @@ -6881,7 +6881,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "log", "parity-scale-codec", @@ -6895,7 +6895,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "log", @@ -6913,7 +6913,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "derive_more", @@ -6958,7 +6958,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "finality-grandpa", @@ -6987,7 +6987,7 @@ dependencies = [ [[package]] name = "sc-informant" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "ansi_term 0.12.1", "futures 0.3.5", @@ -7004,7 +7004,7 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "hex", @@ -7020,7 +7020,7 @@ dependencies = [ [[package]] name = "sc-light" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "hash-db", "lazy_static", @@ -7038,7 +7038,7 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "async-std", @@ -7098,7 +7098,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "async-std", "futures 0.3.5", @@ -7116,7 +7116,7 @@ dependencies = [ [[package]] name = "sc-network-test" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "env_logger", "futures 0.3.5", @@ -7142,7 +7142,7 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "bytes 0.5.6", "env_logger", @@ -7175,7 +7175,7 @@ dependencies = [ [[package]] name = "sc-peerset" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "libp2p 0.23.0", @@ -7188,7 +7188,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7196,7 +7196,7 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "assert_matches", "futures 0.1.29", @@ -7235,7 +7235,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -7258,7 +7258,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "jsonrpc-core", "jsonrpc-http-server", @@ -7273,7 +7273,7 @@ dependencies = [ [[package]] name = "sc-runtime-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sp-allocator", "sp-core", @@ -7286,7 +7286,7 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "async-std", "derive_more", @@ -7352,7 +7352,7 @@ dependencies = [ [[package]] name = "sc-service-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "env_logger", "fdlimit", @@ -7388,7 +7388,7 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "env_logger", "log", @@ -7402,7 +7402,7 @@ dependencies = [ [[package]] name = "sc-telemetry" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -7422,7 +7422,7 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "erased-serde", "log", @@ -7439,7 +7439,7 @@ dependencies = [ [[package]] name = "sc-transaction-graph" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "assert_matches", "criterion 0.3.3", @@ -7463,7 +7463,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "assert_matches", "derive_more", @@ -7909,7 +7909,7 @@ dependencies = [ [[package]] name = "sp-allocator" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "log", @@ -7920,7 +7920,7 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "hash-db", "parity-scale-codec", @@ -7935,7 +7935,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "blake2-rfc", "proc-macro-crate", @@ -7946,7 +7946,7 @@ dependencies = [ [[package]] name = "sp-api-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.3.3", "parity-scale-codec", @@ -7965,7 +7965,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "serde", @@ -7976,7 +7976,7 @@ dependencies = [ [[package]] name = "sp-application-crypto-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sp-api", "sp-application-crypto", @@ -7987,7 +7987,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.3.3", "integer-sqrt", @@ -8003,7 +8003,7 @@ dependencies = [ [[package]] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "honggfuzz", "num-bigint", @@ -8014,7 +8014,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -8025,7 +8025,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -8035,7 +8035,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -8046,7 +8046,7 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "log", @@ -8062,7 +8062,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "serde", "serde_json", @@ -8070,7 +8070,7 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -8096,7 +8096,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -8109,7 +8109,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "merlin", "parity-scale-codec", @@ -8127,7 +8127,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -8138,7 +8138,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8146,7 +8146,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -8157,7 +8157,7 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "base58", "blake2-rfc", @@ -8206,7 +8206,7 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "kvdb 0.7.0", "parking_lot 0.10.2", @@ -8214,7 +8214,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "proc-macro2", "quote", @@ -8223,7 +8223,7 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "environmental", "parity-scale-codec", @@ -8233,7 +8233,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "finality-grandpa", "log", @@ -8248,7 +8248,7 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -8257,7 +8257,7 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "parity-scale-codec", @@ -8268,7 +8268,7 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "hash-db", @@ -8288,7 +8288,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "lazy_static", "sp-core", @@ -8298,7 +8298,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "rand 0.7.3", @@ -8312,7 +8312,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8333,7 +8333,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sp-api", "sp-core", @@ -8343,7 +8343,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "backtrace", "log", @@ -8351,7 +8351,7 @@ dependencies = [ [[package]] name = "sp-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "serde", "serde_json", @@ -8360,7 +8360,7 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "either", "hash256-std-hasher", @@ -8383,7 +8383,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "primitive-types", @@ -8404,7 +8404,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "Inflector", "proc-macro-crate", @@ -8415,7 +8415,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sc-executor", "sp-core", @@ -8430,7 +8430,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sp-core", "sp-io", @@ -8441,7 +8441,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "sp-core", "sp-io", @@ -8452,7 +8452,7 @@ dependencies = [ [[package]] name = "sp-sandbox" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "assert_matches", "parity-scale-codec", @@ -8466,7 +8466,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "serde", "serde_json", @@ -8474,7 +8474,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-api", @@ -8486,7 +8486,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8495,7 +8495,7 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "hash-db", "hex-literal", @@ -8518,11 +8518,11 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0-rc5" +version = "2.0.0-rc6" [[package]] name = "sp-storage" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "impl-serde 0.2.3", "parity-scale-codec", @@ -8534,7 +8534,7 @@ dependencies = [ [[package]] name = "sp-test-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", "parity-util-mem 0.7.0", @@ -8546,7 +8546,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "log", "rental", @@ -8568,7 +8568,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -8582,7 +8582,7 @@ dependencies = [ [[package]] name = "sp-trie" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "criterion 0.2.11", "hash-db", @@ -8600,7 +8600,7 @@ dependencies = [ [[package]] name = "sp-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "futures-core", @@ -8611,7 +8611,7 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "impl-serde 0.2.3", "parity-scale-codec", @@ -8622,7 +8622,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8737,7 +8737,7 @@ dependencies = [ [[package]] name = "subkey" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-system", "node-primitives", @@ -8762,7 +8762,7 @@ dependencies = [ [[package]] name = "substrate-browser-utils" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "chrono", "console_error_panic_hook", @@ -8787,14 +8787,14 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "platforms", ] [[package]] name = "substrate-frame-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-system", "sc-cli", @@ -8805,7 +8805,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "frame-support", "frame-system", @@ -8821,7 +8821,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "env_logger", "frame-system-rpc-runtime-api", @@ -8846,7 +8846,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "async-std", "derive_more", @@ -8859,7 +8859,7 @@ dependencies = [ [[package]] name = "substrate-test-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.1.29", "futures 0.3.5", @@ -8884,7 +8884,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "cfg-if", "frame-executive", @@ -8927,7 +8927,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "parity-scale-codec", @@ -8947,7 +8947,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "derive_more", "futures 0.3.5", @@ -8962,7 +8962,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "sc-service", @@ -8973,7 +8973,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" -version = "0.8.0-rc5" +version = "0.8.0-rc6" dependencies = [ "proc-macro-crate", "quote", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 0c988ebd1a2..d8cc9478bbd 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Anonymous"] description = "A new FRAME-based Substrate node, ready for hacking." edition = "2018" @@ -18,34 +18,34 @@ name = "node-template" [dependencies] structopt = "0.3.8" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.8.0-rc5", path = "../../../client/service", features = ["wasmtime"] } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-consensus-aura = { version = "0.8.0-rc5", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sc-executor = { version = "0.8.0-rc6", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.8.0-rc6", path = "../../../client/service", features = ["wasmtime"] } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } +sc-consensus-aura = { version = "0.8.0-rc6", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8.0-rc6", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.8.0-rc6", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "2.0.0-rc6", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } # These dependencies are used for the node template's RPCs jsonrpc-core = "14.0.3" -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../client/rpc-api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { version = "2.0.0-rc5", path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment/rpc/" } +sc-rpc = { version = "2.0.0-rc6", path = "../../../client/rpc" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../../../client/rpc-api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.8.0-rc6", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "2.0.0-rc6", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "2.0.0-rc6", path = "../../../frame/transaction-payment/rpc/" } -node-template-runtime = { version = "2.0.0-rc5", path = "../runtime" } +node-template-runtime = { version = "2.0.0-rc6", path = "../runtime" } [build-dependencies] -substrate-build-script-utils = { version = "2.0.0-rc5", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "2.0.0-rc6", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 6d8868386e3..106e4af37a8 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -2,7 +2,7 @@ authors = ['Anonymous'] edition = '2018' name = 'pallet-template' -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" @@ -16,27 +16,27 @@ codec = { package = "parity-scale-codec", version = "1.3.4", default-features = [dependencies.frame-support] default-features = false -version = "2.0.0-rc5" +version = "2.0.0-rc6" path = "../../../../frame/support" [dependencies.frame-system] default-features = false -version = "2.0.0-rc5" +version = "2.0.0-rc6" path = "../../../../frame/system" [dev-dependencies.sp-core] default-features = false -version = "2.0.0-rc5" +version = "2.0.0-rc6" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = "2.0.0-rc5" +version = "2.0.0-rc6" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = "2.0.0-rc5" +version = "2.0.0-rc6" path = "../../../../primitives/runtime" diff --git a/bin/node-template/pallets/template/README.md b/bin/node-template/pallets/template/README.md new file mode 100644 index 00000000000..8d751a42207 --- /dev/null +++ b/bin/node-template/pallets/template/README.md @@ -0,0 +1 @@ +License: Unlicense \ No newline at end of file diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index f4e8697a47d..3cb0754089d 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Anonymous"] edition = "2018" license = "Unlicense" @@ -13,34 +13,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -pallet-aura = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/aura" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/balances" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-sudo = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/sudo" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } +pallet-aura = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/aura" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/balances" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc5"} -sp-consensus-aura = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc5"} -sp-offchain = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc6"} +sp-consensus-aura = { version = "0.8.0-rc6", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc6"} +sp-offchain = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +frame-system-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -template = { version = "2.0.0-rc5", default-features = false, path = "../pallets/template", package = "pallet-template" } +template = { version = "2.0.0-rc6", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 0f93039c3c1..adefbd07082 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-bench" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2018" @@ -10,27 +10,27 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] log = "0.4.8" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-testing = { version = "2.0.0-rc5", path = "../testing" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api/" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +node-testing = { version = "2.0.0-rc6", path = "../testing" } +node-runtime = { version = "2.0.0-rc6", path = "../runtime" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api/" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } serde = "1.0.101" serde_json = "1.0.41" structopt = "0.3" derive_more = "0.99.2" kvdb = "0.7" kvdb-rocksdb = "0.9" -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } +sp-trie = { version = "2.0.0-rc6", path = "../../../primitives/trie" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } +sc-basic-authorship = { version = "0.8.0-rc6", path = "../../../client/basic-authorship" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sp-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/timestamp" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" @@ -39,5 +39,5 @@ rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.1.2" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../../client/transaction-pool" } futures = { version = "0.3.4", features = ["thread-pool"] } diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 1f5db1053d7..977a602e1da 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-browser-testing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Tests for the in-browser light client." edition = "2018" @@ -17,5 +17,5 @@ wasm-bindgen-futures = "0.4.10" wasm-bindgen-test = "0.3.10" futures = "0.3.4" -node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0-rc5"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0-rc5"} +node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0-rc6"} +sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0-rc6"} diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d8ed12f296b..09ed51616d5 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Generic Substrate node implementation in Rust." build = "build.rs" @@ -47,77 +47,77 @@ tracing = "0.1.18" parking_lot = "0.10.0" # primitives -sp-authority-discovery = { version = "2.0.0-rc5", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0-rc5", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } +sp-authority-discovery = { version = "2.0.0-rc6", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "2.0.0-rc6", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/timestamp" } +sp-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/finality-tracker" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../primitives/keyring" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } # client dependencies -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../../../client/chain-spec" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } -sc-network = { version = "0.8.0-rc5", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0-rc5", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0-rc5", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0-rc5", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0-rc5", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0-rc5", path = "../../../client/authority-discovery" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api" } +sc-chain-spec = { version = "2.0.0-rc6", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../../client/transaction-pool" } +sc-network = { version = "0.8.0-rc6", path = "../../../client/network" } +sc-consensus-babe = { version = "0.8.0-rc6", path = "../../../client/consensus/babe" } +grandpa = { version = "0.8.0-rc6", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8.0-rc6", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "2.0.0-rc6", path = "../../../client/offchain" } +sc-rpc = { version = "2.0.0-rc6", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.8.0-rc6", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0-rc6", path = "../../../client/tracing" } +sc-telemetry = { version = "2.0.0-rc6", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.8.0-rc6", path = "../../../client/authority-discovery" } # frame dependencies -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0-rc5", path = "../../../frame/authority-discovery" } -pallet-staking = { version = "2.0.0-rc5", path = "../../../frame/staking" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-rc6", path = "../../../frame/indices" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/timestamp" } +pallet-contracts = { version = "2.0.0-rc6", path = "../../../frame/contracts" } +frame-system = { version = "2.0.0-rc6", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-rc6", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-rc6", path = "../../../frame/transaction-payment" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "2.0.0-rc6", path = "../../../frame/authority-discovery" } +pallet-staking = { version = "2.0.0-rc6", path = "../../../frame/staking" } +pallet-grandpa = { version = "2.0.0-rc6", path = "../../../frame/grandpa" } # node-specific dependencies -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -node-rpc = { version = "2.0.0-rc5", path = "../rpc" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-executor = { version = "2.0.0-rc5", path = "../executor" } +node-runtime = { version = "2.0.0-rc6", path = "../runtime" } +node-rpc = { version = "2.0.0-rc6", path = "../rpc" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +node-executor = { version = "2.0.0-rc6", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0-rc5", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-inspect = { version = "0.8.0-rc5", optional = true, path = "../inspect" } +sc-cli = { version = "0.8.0-rc6", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "2.0.0-rc6", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-inspect = { version = "0.8.0-rc6", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-rc5"} +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-rc6"} [target.'cfg(target_arch="x86_64")'.dependencies] -node-executor = { version = "2.0.0-rc5", path = "../executor", features = [ "wasmtime" ] } -sc-cli = { version = "0.8.0-rc5", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } -sp-trie = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } +node-executor = { version = "2.0.0-rc6", path = "../executor", features = [ "wasmtime" ] } +sc-cli = { version = "0.8.0-rc6", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } +sp-trie = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } [dev-dependencies] -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.8.0-rc5", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../../client/consensus/epochs" } -sc-service-test = { version = "2.0.0-rc5", path = "../../../client/service/test" } +sc-keystore = { version = "2.0.0-rc6", path = "../../../client/keystore" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } +sc-consensus-babe = { version = "0.8.0-rc6", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8.0-rc6", path = "../../../client/consensus/epochs" } +sc-service-test = { version = "2.0.0-rc6", path = "../../../client/service/test" } futures = "0.3.4" tempfile = "3.1.0" assert_cmd = "1.0" @@ -128,13 +128,13 @@ platforms = "0.2.1" [build-dependencies] structopt = { version = "0.3.8", optional = true } -node-inspect = { version = "0.8.0-rc5", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/benchmarking-cli" } -substrate-build-script-utils = { version = "2.0.0-rc5", optional = true, path = "../../../utils/build-script-utils" } -substrate-frame-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/frame-utilities-cli" } +node-inspect = { version = "0.8.0-rc6", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "2.0.0-rc6", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "2.0.0-rc6", optional = true, path = "../../../utils/build-script-utils" } +substrate-frame-cli = { version = "2.0.0-rc6", optional = true, path = "../../../utils/frame/frame-utilities-cli" } [build-dependencies.sc-cli] -version = "0.8.0-rc5" +version = "0.8.0-rc6" package = "sc-cli" path = "../../../client/cli" optional = true diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 6c6920d62be..d8fb2e4078b 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-executor" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" @@ -13,34 +13,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +node-runtime = { version = "2.0.0-rc6", path = "../runtime" } +sc-executor = { version = "0.8.0-rc6", path = "../../../client/executor" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } +sp-trie = { version = "2.0.0-rc6", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "2.0.0-rc6", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0-rc5", path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -node-testing = { version = "2.0.0-rc5", path = "../testing" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc5", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0-rc5", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc5", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../../test-utils/client" } +frame-support = { version = "2.0.0-rc6", path = "../../../frame/support" } +frame-system = { version = "2.0.0-rc6", path = "../../../frame/system" } +node-testing = { version = "2.0.0-rc6", path = "../testing" } +pallet-balances = { version = "2.0.0-rc6", path = "../../../frame/balances" } +pallet-contracts = { version = "2.0.0-rc6", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-rc6", path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-rc6", path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-rc6", path = "../../../frame/indices" } +pallet-session = { version = "2.0.0-rc6", path = "../../../frame/session" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc6", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-rc6", path = "../../../frame/treasury" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.8.0-rc6", path = "../../../primitives/externalities" } +substrate-test-client = { version = "2.0.0-rc6", path = "../../../test-utils/client" } wabt = "0.9.2" [features] diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index b7f828a5f1e..f8dc32f1e05 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-inspect" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 6ff8a05d614..15fc493289f 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,13 +12,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc5", path = "../../../primitives/serializer" } +sp-serializer = { version = "2.0.0-rc6", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index d1a76f2ab37..698aa8f08ae 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,5 +16,5 @@ futures = "0.1.29" hyper = "0.12.35" jsonrpc-core-client = { version = "14.2.0", default-features = false, features = ["http"] } log = "0.4.8" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +sc-rpc = { version = "2.0.0-rc6", path = "../../../client/rpc" } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index ee11ff4ac8c..9ed8c22fbe3 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpc-core = "14.2.0" jsonrpc-pubsub = "14.2.0" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -pallet-contracts-rpc = { version = "0.8.0-rc5", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment/rpc/" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-consensus-babe = { version = "0.8.0-rc5", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0-rc5", path = "../../../client/consensus/babe/rpc" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../../client/consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa" } -sc-finality-grandpa-rpc = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa/rpc" } -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../client/rpc-api" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -substrate-frame-rpc-system = { version = "2.0.0-rc5", path = "../../../utils/frame/rpc/system" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +node-runtime = { version = "2.0.0-rc6", path = "../runtime" } +pallet-contracts-rpc = { version = "0.8.0-rc6", path = "../../../frame/contracts/rpc/" } +pallet-transaction-payment-rpc = { version = "2.0.0-rc6", path = "../../../frame/transaction-payment/rpc/" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api" } +sc-consensus-babe = { version = "0.8.0-rc6", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.8.0-rc6", path = "../../../client/consensus/babe/rpc" } +sc-consensus-epochs = { version = "0.8.0-rc6", path = "../../../client/consensus/epochs" } +sc-finality-grandpa = { version = "0.8.0-rc6", path = "../../../client/finality-grandpa" } +sc-finality-grandpa-rpc = { version = "0.8.0-rc6", path = "../../../client/finality-grandpa/rpc" } +sc-keystore = { version = "2.0.0-rc6", path = "../../../client/keystore" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../../../client/rpc-api" } +sc-rpc = { version = "2.0.0-rc6", path = "../../../client/rpc" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../../primitives/consensus/babe" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } +substrate-frame-rpc-system = { version = "2.0.0-rc6", path = "../../../utils/frame/rpc/system" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 35ed7400459..303db4c2d2e 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -21,70 +21,70 @@ static_assertions = "1.1.0" hex-literal = { version = "0.2.1", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc5"} -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/inherents" } -node-primitives = { version = "2.0.0-rc5", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/version" } +sp-authority-discovery = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-rc6", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc6"} +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/inherents" } +node-primitives = { version = "2.0.0-rc6", default-features = false, path = "../primitives" } +sp-offchain = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "2.0.0-rc6", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -frame-system-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc5", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/finality-tracker" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/membership" } -pallet-multisig = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/multisig" } -pallet-offences = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/offences" } -pallet-offences-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-proxy = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/proxy" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0-rc5", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-session-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/staking/reward-curve" } -pallet-scheduler = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/scheduler" } -pallet-society = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/society" } -pallet-sudo = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/sudo" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system" } +frame-system-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-system-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-authority-discovery = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/balances" } +pallet-collective = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc6", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/finality-tracker" } +pallet-grandpa = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/identity" } +pallet-membership = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/membership" } +pallet-multisig = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/multisig" } +pallet-offences = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "2.0.0-rc6", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-proxy = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/proxy" } +pallet-randomness-collective-flip = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "2.0.0-rc6", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "2.0.0-rc6", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/timestamp" } +pallet-treasury = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "2.0.0-rc6", default-features = false, path = "../../../frame/vesting" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index a61a344ccee..23bf10336dc 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-testing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" @@ -13,40 +13,40 @@ publish = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -sc-service = { version = "0.8.0-rc5", features = ["test-helpers", "db"], path = "../../../client/service" } -sc-client-db = { version = "0.8.0-rc5", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api/" } +pallet-balances = { version = "2.0.0-rc6", path = "../../../frame/balances" } +sc-service = { version = "0.8.0-rc6", features = ["test-helpers", "db"], path = "../../../client/service" } +sc-client-db = { version = "0.8.0-rc6", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "1.3.4" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0-rc5", path = "../executor" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -frame-support = { version = "2.0.0-rc5", path = "../../../frame/support" } -pallet-session = { version = "2.0.0-rc5", path = "../../../frame/session" } -pallet-society = { version = "2.0.0-rc5", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0-rc5", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc5", path = "../../../frame/treasury" } +pallet-contracts = { version = "2.0.0-rc6", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-rc6", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-rc6", path = "../../../frame/indices" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../primitives/keyring" } +node-executor = { version = "2.0.0-rc6", path = "../executor" } +node-primitives = { version = "2.0.0-rc6", path = "../primitives" } +node-runtime = { version = "2.0.0-rc6", path = "../runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } +frame-support = { version = "2.0.0-rc6", path = "../../../frame/support" } +pallet-session = { version = "2.0.0-rc6", path = "../../../frame/session" } +pallet-society = { version = "2.0.0-rc6", path = "../../../frame/society" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-rc6", path = "../../../frame/staking" } +sc-executor = { version = "0.8.0-rc6", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +frame-system = { version = "2.0.0-rc6", path = "../../../frame/system" } +substrate-test-client = { version = "2.0.0-rc6", path = "../../../test-utils/client" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc6", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-rc6", path = "../../../frame/treasury" } wabt = "0.9.2" -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../../client/block-builder" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" @@ -54,4 +54,4 @@ futures = "0.3.1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index e90ef12f681..f6d03d4f3d1 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "chain-spec-builder" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,9 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../../../client/chain-spec" } -node-cli = { version = "2.0.0-rc5", path = "../../node/cli" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } +sc-keystore = { version = "2.0.0-rc6", path = "../../../client/keystore" } +sc-chain-spec = { version = "2.0.0-rc6", path = "../../../client/chain-spec" } +node-cli = { version = "2.0.0-rc6", path = "../../node/cli" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/chain-spec-builder/README.md b/bin/utils/chain-spec-builder/README.md new file mode 100644 index 00000000000..3e9ac0bddbd --- /dev/null +++ b/bin/utils/chain-spec-builder/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 1b0288faeed..0dc1a1b5970 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "subkey" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,13 +15,13 @@ name = "subkey" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -node-runtime = { version = "2.0.0-rc5", path = "../../node/runtime" } -node-primitives = { version = "2.0.0-rc5", path = "../../node/primitives" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -substrate-frame-cli = { version = "2.0.0-rc5", path = "../../../utils/frame/frame-utilities-cli" } +node-runtime = { version = "2.0.0-rc6", path = "../../node/runtime" } +node-primitives = { version = "2.0.0-rc6", path = "../../node/primitives" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } +substrate-frame-cli = { version = "2.0.0-rc6", path = "../../../utils/frame/frame-utilities-cli" } structopt = "0.3.14" -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } +frame-system = { version = "2.0.0-rc6", path = "../../../frame/system" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } [features] bench = [] diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md new file mode 100644 index 00000000000..3e9ac0bddbd --- /dev/null +++ b/bin/utils/subkey/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 45601771a40..e84c3642bcf 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,36 +14,36 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } +sc-executor = { version = "0.8.0-rc6", path = "../executor" } +sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1" } hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } kvdb = "0.7.0" log = { version = "0.4.8" } parking_lot = "0.10.0" lazy_static = "1.4.0" -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +sp-database = { version = "2.0.0-rc6", path = "../../primitives/database" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/version" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +sp-trie = { version = "2.0.0-rc6", path = "../../primitives/trie" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc6", path = "../../utils/prometheus" } [dev-dependencies] kvdb-memorydb = "0.6.0" -sp-test-primitives = { version = "2.0.0-rc5", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } +sp-test-primitives = { version = "2.0.0-rc6", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../test-utils/runtime" } diff --git a/client/api/README.md b/client/api/README.md new file mode 100644 index 00000000000..142f5b32dd9 --- /dev/null +++ b/client/api/README.md @@ -0,0 +1,3 @@ +Substrate client interfaces. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 3f3e5b55894..b651dbbbc94 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -23,21 +23,21 @@ futures = "0.3.4" futures-timer = "3.0.1" libp2p = { version = "0.23.0", default-features = false, features = ["kad"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} prost = "0.6.1" rand = "0.7.2" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sc-network = { version = "0.8.0-rc5", path = "../network" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } +sc-network = { version = "0.8.0-rc6", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0-rc5", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sp-authority-discovery = { version = "2.0.0-rc6", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } [dev-dependencies] env_logger = "0.7.0" quickcheck = "0.9.0" -sc-peerset = { version = "2.0.0-rc5", path = "../peerset" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client"} +sc-peerset = { version = "2.0.0-rc6", path = "../peerset" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client"} diff --git a/client/authority-discovery/README.md b/client/authority-discovery/README.md new file mode 100644 index 00000000000..54c51d5ba04 --- /dev/null +++ b/client/authority-discovery/README.md @@ -0,0 +1,9 @@ +Substrate authority discovery. + +This crate enables Substrate authorities to discover and directly connect to +other authorities. It is split into two components the [`Worker`] and the +[`Service`]. + +See [`Worker`] and [`Service`] for more documentation. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 6160a41fdef..6c9da3f3d8a 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-basic-authorship" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,21 +16,21 @@ codec = { package = "parity-scale-codec", version = "1.3.4" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sc-proposer-metrics = { version = "0.8.0-rc5", path = "../proposer-metrics" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc6", path = "../../primitives/inherents" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.8.0-rc6", path = "../block-builder" } +sc-proposer-metrics = { version = "0.8.0-rc6", path = "../proposer-metrics" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../client/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../client/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } parking_lot = "0.10.0" diff --git a/client/basic-authorship/README.md b/client/basic-authorship/README.md new file mode 100644 index 00000000000..1a20593c09e --- /dev/null +++ b/client/basic-authorship/README.md @@ -0,0 +1,32 @@ +Basic implementation of block-authoring logic. + +# Example + +```rust +// The first step is to create a `ProposerFactory`. +let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); + +// From this factory, we create a `Proposer`. +let proposer = proposer_factory.init( + &client.header(&BlockId::number(0)).unwrap().unwrap(), +); + +// The proposer is created asynchronously. +let proposer = futures::executor::block_on(proposer).unwrap(); + +// This `Proposer` allows us to create a block proposition. +// The proposer will grab transactions from the transaction pool, and put them into the block. +let future = proposer.propose( + Default::default(), + Default::default(), + Duration::from_secs(2), + RecordProof::Yes, +); + +// We wait until the proposition is performed. +let block = futures::executor::block_on(future).unwrap(); +println!("Generated block: {:?}", block.block); +``` + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index ac1d0265fd9..94d6b70eeeb 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-block-builder" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,17 +13,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-rc6", path = "../../primitives/inherents" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } +sp-trie = { version = "2.0.0-rc6", path = "../../primitives/trie" } diff --git a/client/block-builder/README.md b/client/block-builder/README.md new file mode 100644 index 00000000000..c691f6692ab --- /dev/null +++ b/client/block-builder/README.md @@ -0,0 +1,9 @@ +Substrate block builder + +This crate provides the [`BlockBuilder`] utility and the corresponding runtime api +[`BlockBuilder`](sp_block_builder::BlockBuilder).Error + +The block builder utility is used in the node as an abstraction over the runtime api to +initialize a block, to push extrinsics and to finalize a block. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 5a4759cbf17..fb0addf461a 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,12 +12,12 @@ description = "Substrate chain configurations." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-chain-spec-derive = { version = "2.0.0-rc5", path = "./derive" } +sc-chain-spec-derive = { version = "2.0.0-rc6", path = "./derive" } impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-chain-spec = { version = "2.0.0-rc5", path = "../../primitives/chain-spec" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-chain-spec = { version = "2.0.0-rc6", path = "../../primitives/chain-spec" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } diff --git a/client/chain-spec/README.md b/client/chain-spec/README.md new file mode 100644 index 00000000000..6475c811045 --- /dev/null +++ b/client/chain-spec/README.md @@ -0,0 +1,92 @@ +Substrate chain configurations. + +This crate contains structs and utilities to declare +a runtime-specific configuration file (a.k.a chain spec). + +Basic chain spec type containing all required parameters is +[`ChainSpec`](./struct.ChainSpec.html). It can be extended with +additional options that contain configuration specific to your chain. +Usually the extension is going to be an amalgamate of types exposed +by Substrate core modules. To allow the core modules to retrieve +their configuration from your extension you should use `ChainSpecExtension` +macro exposed by this crate. + +```rust +use std::collections::HashMap; +use sc_chain_spec::{GenericChainSpec, ChainSpecExtension}; + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] +pub struct MyExtension { + pub known_blocks: HashMap, +} + +pub type MyChainSpec = GenericChainSpec; +``` + +Some parameters may require different values depending on the +current blockchain height (a.k.a. forks). You can use `ChainSpecGroup` +macro and provided [`Forks`](./struct.Forks.html) structure to put +such parameters to your chain spec. +This will allow to override a single parameter starting at specific +block number. + +```rust +use sc_chain_spec::{Forks, ChainSpecGroup, ChainSpecExtension, GenericChainSpec}; + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] +pub struct ClientParams { + max_block_size: usize, + max_extrinsic_size: usize, +} + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] +pub struct PoolParams { + max_transaction_size: usize, +} + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup, ChainSpecExtension)] +pub struct Extension { + pub client: ClientParams, + pub pool: PoolParams, +} + +pub type BlockNumber = u64; + +/// A chain spec supporting forkable `ClientParams`. +pub type MyChainSpec1 = GenericChainSpec>; + +/// A chain spec supporting forkable `Extension`. +pub type MyChainSpec2 = GenericChainSpec>; +``` + +It's also possible to have a set of parameters that is allowed to change +with block numbers (i.e. is forkable), and another set that is not subject to changes. +This is also possible by declaring an extension that contains `Forks` within it. + + +```rust +use serde::{Serialize, Deserialize}; +use sc_chain_spec::{Forks, GenericChainSpec, ChainSpecGroup, ChainSpecExtension}; + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] +pub struct ClientParams { + max_block_size: usize, + max_extrinsic_size: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] +pub struct PoolParams { + max_transaction_size: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecExtension)] +pub struct Extension { + pub client: ClientParams, + #[forks] + pub pool: Forks, +} + +pub type MyChainSpec = GenericChainSpec; +``` + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index b7eb03d7fbb..a3112e10fac 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 2fe1b30ab14..57bc622deb4 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -29,24 +29,24 @@ hex = "0.4.2" rand = "0.7.3" bip39 = "0.6.0-beta.1" serde_json = "1.0.41" -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sc-informant = { version = "0.8.0-rc5", path = "../informant" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-rc5"} -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } +sc-informant = { version = "0.8.0-rc6", path = "../informant" } +sp-panic-handler = { version = "2.0.0-rc6", path = "../../primitives/panic-handler" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-rc6"} +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } +sc-tracing = { version = "2.0.0-rc6", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } serde = "1.0.111" diff --git a/client/cli/README.md b/client/cli/README.md new file mode 100644 index 00000000000..2504dbb0c03 --- /dev/null +++ b/client/cli/README.md @@ -0,0 +1,3 @@ +Substrate CLI library. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 6bf60335b7b..b107499daf4 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" @@ -12,37 +12,37 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-rc6", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../../client/block-builder" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sc-keystore = { version = "2.0.0-rc5", path = "../../keystore" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sc-keystore = { version = "2.0.0-rc6", path = "../../keystore" } log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-version = { version = "2.0.0-rc5", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0-rc5", path = "../slots" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } +sp-version = { version = "2.0.0-rc6", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.8.0-rc6", path = "../slots" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-rc6", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-rc6", path = "../../telemetry" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc6"} [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-rc6", path = "../../executor" } +sc-network = { version = "0.8.0-rc6", path = "../../network" } +sc-network-test = { version = "0.8.0-rc6", path = "../../network/test" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/aura/README.md b/client/consensus/aura/README.md new file mode 100644 index 00000000000..85d82cd7dfd --- /dev/null +++ b/client/consensus/aura/README.md @@ -0,0 +1,15 @@ +Aura (Authority-round) consensus in substrate. + +Aura works by having a list of authorities A who are expected to roughly +agree on the current time. Time is divided up into discrete slots of t +seconds each. For each slot s, the author of that slot is A[s % |A|]. + +The author is allowed to issue one block but not more during that slot, +and it will be built upon the longest valid chain that has been seen. + +Blocks from future steps will be either deferred or rejected depending on how +far in the future they are. + +NOTE: Aura itself is designed to be generic over the crypto used. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 1b6b705139c..58385670967 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -14,32 +14,32 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0-rc5", path = "../../../primitives/version" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -sc-keystore = { version = "2.0.0-rc5", path = "../../keystore" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../epochs" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-consensus-vrf = { version = "0.8.0-rc5", path = "../../../primitives/consensus/vrf" } -sc-consensus-uncles = { version = "0.8.0-rc5", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0-rc5", path = "../slots" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../../primitives/utils" } -fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +sp-version = { version = "2.0.0-rc6", path = "../../../primitives/version" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0-rc6", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-rc6", path = "../../telemetry" } +sc-keystore = { version = "2.0.0-rc6", path = "../../keystore" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sc-consensus-epochs = { version = "0.8.0-rc6", path = "../epochs" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-consensus-vrf = { version = "0.8.0-rc6", path = "../../../primitives/consensus/vrf" } +sc-consensus-uncles = { version = "0.8.0-rc6", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0-rc6", path = "../slots" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../../primitives/utils" } +fork-tree = { version = "2.0.0-rc6", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc6"} futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" @@ -52,13 +52,13 @@ derive_more = "0.99.2" retain_mut = "0.1.1" [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-rc6", path = "../../executor" } +sc-network = { version = "0.8.0-rc6", path = "../../network" } +sc-network-test = { version = "0.8.0-rc6", path = "../../network/test" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../block-builder" } env_logger = "0.7.0" rand_chacha = "0.2.2" tempfile = "3.1.0" diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md new file mode 100644 index 00000000000..faba3948ed7 --- /dev/null +++ b/client/consensus/babe/README.md @@ -0,0 +1,48 @@ +# BABE (Blind Assignment for Blockchain Extension) + +BABE is a slot-based block production mechanism which uses a VRF PRNG to +randomly perform the slot allocation. On every slot, all the authorities +generate a new random number with the VRF function and if it is lower than a +given threshold (which is proportional to their weight/stake) they have a +right to produce a block. The proof of the VRF function execution will be +used by other peer to validate the legitimacy of the slot claim. + +The engine is also responsible for collecting entropy on-chain which will be +used to seed the given VRF PRNG. An epoch is a contiguous number of slots +under which we will be using the same authority set. During an epoch all VRF +outputs produced as a result of block production will be collected on an +on-chain randomness pool. Epoch changes are announced one epoch in advance, +i.e. when ending epoch N, we announce the parameters (randomness, +authorities, etc.) for epoch N+2. + +Since the slot assignment is randomized, it is possible that a slot is +assigned to multiple validators in which case we will have a temporary fork, +or that a slot is assigned to no validator in which case no block is +produced. Which means that block times are not deterministic. + +The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability +of a slot being empty. The choice of this parameter affects the security of +the protocol relating to maximum tolerable network delays. + +In addition to the VRF-based slot assignment described above, which we will +call primary slots, the engine also supports a deterministic secondary slot +assignment. Primary slots take precedence over secondary slots, when +authoring the node starts by trying to claim a primary slot and falls back +to a secondary slot claim attempt. The secondary slot assignment is done +by picking the authority at index: + +`blake2_256(epoch_randomness ++ slot_number) % authorities_len`. + +The secondary slots supports either a `SecondaryPlain` or `SecondaryVRF` +variant. Comparing with `SecondaryPlain` variant, the `SecondaryVRF` variant +generates an additional VRF output. The output is not included in beacon +randomness, but can be consumed by parachains. + +The fork choice rule is weight-based, where weight equals the number of +primary blocks in the chain. We will pick the heaviest chain (more primary +blocks) and will go with the longest one in case of a tie. + +An in-depth description and analysis of the protocol can be found here: + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 280b38f4d6c..4d2e89af3b0 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" @@ -12,27 +12,27 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-consensus-babe = { version = "0.8.0-rc5", path = "../" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../rpc-api" } +sc-consensus-babe = { version = "0.8.0-rc6", path = "../" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../../../rpc-api" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../../primitives/consensus/babe" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../epochs" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0-rc6", path = "../../epochs" } futures = { version = "0.3.4", features = ["compat"] } derive_more = "0.99.2" -sp-api = { version = "2.0.0-rc5", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../../primitives/application-crypto" } -sc-keystore = { version = "2.0.0-rc5", path = "../../../keystore" } +sp-api = { version = "2.0.0-rc6", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../../../primitives/application-crypto" } +sc-keystore = { version = "2.0.0-rc6", path = "../../../keystore" } [dev-dependencies] -sc-consensus = { version = "0.8.0-rc5", path = "../../../consensus/common" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../consensus/common" } serde_json = "1.0.50" -sp-keyring = { version = "2.0.0-rc5", path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/README.md b/client/consensus/babe/rpc/README.md new file mode 100644 index 00000000000..e76dd3dc67f --- /dev/null +++ b/client/consensus/babe/rpc/README.md @@ -0,0 +1,3 @@ +RPC api for babe. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index a617cf77af3..69d5eae8516 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,7 +12,7 @@ description = "Collection of common consensus specific imlementations for Substr targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/common/README.md b/client/consensus/common/README.md new file mode 100644 index 00000000000..a6717a1d7a6 --- /dev/null +++ b/client/consensus/common/README.md @@ -0,0 +1,3 @@ +Collection of common consensus specific implementations + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 4a26611a758..7bcc30e3cff 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" -fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc5"} -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0-rc5"} +fork-tree = { version = "2.0.0-rc6", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc6"} +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "2.0.0-rc6"} diff --git a/client/consensus/epochs/README.md b/client/consensus/epochs/README.md new file mode 100644 index 00000000000..1e74e04172c --- /dev/null +++ b/client/consensus/epochs/README.md @@ -0,0 +1,3 @@ +Generic utilities for epoch-based consensus engines. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index ab77f355bfa..b557f171c35 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" @@ -22,20 +22,20 @@ parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" -sc-client-api = { path = "../../../client/api", version = "2.0.0-rc5" } -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc5" } -sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0-rc5" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0-rc5" } -sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0-rc5" } -sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0-rc5" } -sp-core = { path = "../../../primitives/core", version = "2.0.0-rc5" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0-rc5" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5" } +sc-client-api = { path = "../../../client/api", version = "2.0.0-rc6" } +sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc6" } +sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0-rc6" } +sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0-rc6" } +sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0-rc6" } +sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0-rc6" } +sp-core = { path = "../../../primitives/core", version = "2.0.0-rc6" } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0-rc6" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc6" } [dev-dependencies] -sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0-rc5" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0-rc5" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0-rc5" } +sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0-rc6" } +substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0-rc6" } +substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0-rc6" } tokio = { version = "0.2", features = ["rt-core", "macros"] } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/manual-seal/README.md b/client/consensus/manual-seal/README.md new file mode 100644 index 00000000000..b355f8b7318 --- /dev/null +++ b/client/consensus/manual-seal/README.md @@ -0,0 +1,4 @@ +A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. +This is suitable for a testing environment. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index b72166f9ce9..993502972f2 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" @@ -13,17 +13,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0-rc5", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.8.0-rc6", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } +sp-timestamp = { version = "2.0.0-rc6", path = "../../../primitives/timestamp" } derive_more = "0.99.2" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc6"} diff --git a/client/consensus/pow/README.md b/client/consensus/pow/README.md new file mode 100644 index 00000000000..a335ec36704 --- /dev/null +++ b/client/consensus/pow/README.md @@ -0,0 +1,16 @@ +Proof of work consensus for Substrate. + +To use this engine, you can need to have a struct that implements +`PowAlgorithm`. After that, pass an instance of the struct, along +with other necessary client references to `import_queue` to setup +the queue. Use the `start_mine` function for basic CPU mining. + +The auxiliary storage for PoW engine only stores the total difficulty. +For other storage requirements for particular PoW algorithm (such as +the actual difficulty for each particular blocks), you can take a client +reference in your `PowAlgorithm` implementation, and use a separate prefix +for the auxiliary storage. It is also possible to just use the runtime +as the storage, but it is not recommended as it won't work well with light +clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 9fe82d85053..1ba015b0801 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -14,21 +14,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus-slots = { version = "0.8.0-rc5", path = "../../../primitives/consensus/slots" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../../primitives/application-crypto" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-consensus-slots = { version = "0.8.0-rc6", path = "../../../primitives/consensus/slots" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sc-telemetry = { version = "2.0.0-rc6", path = "../../telemetry" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/README.md b/client/consensus/slots/README.md new file mode 100644 index 00000000000..9ab3c3742f3 --- /dev/null +++ b/client/consensus/slots/README.md @@ -0,0 +1,7 @@ +Slots functionality for Substrate. + +Some consensus algorithms have a concept of *slots*, which are intervals in +time during which certain events can and/or must occur. This crate +provides generic functionality for slots. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 757daeea551..106fb57b6e6 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" @@ -12,10 +12,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0-rc5", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-authorship = { version = "2.0.0-rc6", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc6", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/consensus/uncles/README.md b/client/consensus/uncles/README.md new file mode 100644 index 00000000000..1b6fed5b977 --- /dev/null +++ b/client/consensus/uncles/README.md @@ -0,0 +1,3 @@ +Uncles functionality for Substrate. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 50e14fcaae6..28ef90cf231 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -23,23 +23,23 @@ parity-util-mem = { version = "0.7.0", default-features = false, features = ["st codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } blake2-rfc = "0.2.18" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-state-db = { version = "0.8.0-rc5", path = "../state-db" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-arithmetic = { version = "2.0.0-rc6", path = "../../primitives/arithmetic" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-rc6", path = "../executor" } +sc-state-db = { version = "0.8.0-rc6", path = "../state-db" } +sp-trie = { version = "2.0.0-rc6", path = "../../primitives/trie" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-database = { version = "2.0.0-rc6", path = "../../primitives/database" } parity-db = { version = "0.1.2", optional = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc6", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" kvdb-rocksdb = "0.9" diff --git a/client/db/README.md b/client/db/README.md new file mode 100644 index 00000000000..e5fb3fce1d9 --- /dev/null +++ b/client/db/README.md @@ -0,0 +1,11 @@ +Client backend that is backed by a database. + +# Canonicality vs. Finality + +Finality indicates that a block will not be reverted, according to the consensus algorithm, +while canonicality indicates that the block may be reverted, but we will be unable to do so, +having discarded heavy state that will allow a chain reorganization. + +Finality implies canonicality but not vice-versa. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index f59c89a9d70..c25c9479c82 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,22 +15,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0-rc5", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../primitives/panic-handler" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-trie = { version = "2.0.0-rc6", path = "../../primitives/trie" } +sp-serializer = { version = "2.0.0-rc6", path = "../../primitives/serializer" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } +sp-panic-handler = { version = "2.0.0-rc6", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0-rc5", path = "common" } -sc-executor-wasmi = { version = "0.8.0-rc5", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0-rc5", path = "wasmtime", optional = true } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8.0-rc6", path = "common" } +sc-executor-wasmi = { version = "0.8.0-rc6", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8.0-rc6", path = "wasmtime", optional = true } parking_lot = "0.10.0" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -39,13 +39,13 @@ libsecp256k1 = "0.3.4" assert_matches = "1.3.0" wabt = "0.9.2" hex-literal = "0.2.1" -sc-runtime-test = { version = "2.0.0-rc5", path = "runtime-test" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-runtime-test = { version = "2.0.0-rc6", path = "runtime-test" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../test-utils/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } test-case = "0.3.3" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", path = "../../primitives/tracing" } -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc6", path = "../../primitives/tracing" } +sc-tracing = { version = "2.0.0-rc6", path = "../tracing" } tracing = "0.1.18" [features] diff --git a/client/executor/README.md b/client/executor/README.md new file mode 100644 index 00000000000..ab7b3d45206 --- /dev/null +++ b/client/executor/README.md @@ -0,0 +1,13 @@ +A crate that provides means of executing/dispatching calls into the runtime. + +There are a few responsibilities of this crate at the moment: + +- It provides an implementation of a common entrypoint for calling into the runtime, both +wasm and compiled. +- It defines the environment for the wasm execution, namely the host functions that are to be +provided into the wasm runtime module. +- It also provides the required infrastructure for executing the current wasm runtime (specified +by the current value of `:code` in the provided externalities), i.e. interfacing with +wasm engine used, instance cache. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 2189d89b12d..bdbc5071323 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -18,11 +18,11 @@ derive_more = "0.99.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.4" } wasmi = "0.6.2" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0-rc5", path = "../../../primitives/serializer" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc6", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../../../primitives/runtime-interface" } +sp-serializer = { version = "2.0.0-rc6", path = "../../../primitives/serializer" } [features] default = [] diff --git a/client/executor/common/README.md b/client/executor/common/README.md new file mode 100644 index 00000000000..0c0d3bf08bc --- /dev/null +++ b/client/executor/common/README.md @@ -0,0 +1,3 @@ +A set of common definitions that are needed for defining execution engines. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 9645dd90694..037359ac9ee 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-runtime-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,12 +13,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/sandbox" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-allocator = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/allocator" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.8.0-rc6", default-features = false, path = "../../../primitives/sandbox" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +sp-allocator = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/allocator" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 74456f06671..14468e71fd6 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" wasmi = "0.6.2" codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0-rc5", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0-rc6", path = "../common" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc6", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/README.md b/client/executor/wasmi/README.md new file mode 100644 index 00000000000..ad613aa1245 --- /dev/null +++ b/client/executor/wasmi/README.md @@ -0,0 +1,3 @@ +This crate provides an implementation of `WasmModule` that is baked by wasmi. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 6eea4e6b14a..9618a659f52 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,11 +16,11 @@ log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0-rc5", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0-rc6", path = "../common" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc6", path = "../../../primitives/allocator" } wasmtime = "0.19" pwasm-utils = "0.14.0" diff --git a/client/executor/wasmtime/README.md b/client/executor/wasmtime/README.md new file mode 100644 index 00000000000..3e9ac0bddbd --- /dev/null +++ b/client/executor/wasmtime/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7cd3548a762..b73fbbd8d17 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,46 +15,46 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -fork-tree = { version = "2.0.0-rc5", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-rc6", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" parity-scale-codec = { version = "1.3.4", features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../client/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "2.0.0-rc6", path = "../../primitives/arithmetic" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc6", path = "../../client/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-network-gossip = { version = "0.8.0-rc5", path = "../network-gossip" } -sp-finality-tracker = { version = "2.0.0-rc5", path = "../../primitives/finality-tracker" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-inherents = { version = "2.0.0-rc6", path = "../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sc-network-gossip = { version = "0.8.0-rc6", path = "../network-gossip" } +sp-finality-tracker = { version = "2.0.0-rc6", path = "../../primitives/finality-tracker" } +sp-finality-grandpa = { version = "2.0.0-rc6", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} +sc-block-builder = { version = "0.8.0-rc6", path = "../block-builder" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../network/test" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sc-network-test = { version = "0.8.0-rc6", path = "../network/test" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } env_logger = "0.7.0" tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } diff --git a/client/finality-grandpa/README.md b/client/finality-grandpa/README.md new file mode 100644 index 00000000000..64a7e70bc6a --- /dev/null +++ b/client/finality-grandpa/README.md @@ -0,0 +1,39 @@ +Integration of the GRANDPA finality gadget into substrate. + +This crate is unstable and the API and usage may change. + +This crate provides a long-running future that produces finality notifications. + +# Usage + +First, create a block-import wrapper with the `block_import` function. The +GRANDPA worker needs to be linked together with this block import object, so +a `LinkHalf` is returned as well. All blocks imported (from network or +consensus or otherwise) must pass through this wrapper, otherwise consensus +is likely to break in unexpected ways. + +Next, use the `LinkHalf` and a local configuration to `run_grandpa_voter`. +This requires a `Network` implementation. The returned future should be +driven to completion and will finalize blocks in the background. + +# Changing authority sets + +The rough idea behind changing authority sets in GRANDPA is that at some point, +we obtain agreement for some maximum block height that the current set can +finalize, and once a block with that height is finalized the next set will +pick up finalization from there. + +Technically speaking, this would be implemented as a voting rule which says, +"if there is a signal for a change in N blocks in block B, only vote on +chains with length NUM(B) + N if they contain B". This conditional-inclusion +logic is complex to compute because it requires looking arbitrarily far +back in the chain. + +Instead, we keep track of a list of all signals we've seen so far (across +all forks), sorted ascending by the block number they would be applied at. +We never vote on chains with number higher than the earliest handoff block +number (this is num(signal) + N). When finalizing a block, we either apply +or prune any signaled changes based on whether the signaling block is +included in the newly-finalized chain. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index ca405eaec9d..28197405c8d 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" @@ -8,9 +8,9 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -sc-rpc = { version = "2.0.0-rc5", path = "../../rpc" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../" } +sc-rpc = { version = "2.0.0-rc6", path = "../../rpc" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sc-finality-grandpa = { version = "0.8.0-rc6", path = "../" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" @@ -24,13 +24,13 @@ derive_more = "0.99.2" parity-scale-codec = { version = "1.3.0", features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-rpc = { version = "2.0.0-rc5", path = "../../rpc", features = ["test-helpers"] } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../block-builder" } +sc-network-test = { version = "0.8.0-rc6", path = "../../network/test" } +sc-rpc = { version = "2.0.0-rc6", path = "../../rpc", features = ["test-helpers"] } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-rc6", path = "../../../primitives/finality-grandpa" } +sp-keyring = { version = "2.0.0-rc6", path = "../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } lazy_static = "1.4" diff --git a/client/finality-grandpa/rpc/README.md b/client/finality-grandpa/rpc/README.md new file mode 100644 index 00000000000..0007f55dbd4 --- /dev/null +++ b/client/finality-grandpa/rpc/README.md @@ -0,0 +1,3 @@ +RPC API for GRANDPA. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 917052041ba..6e6dc01f91e 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-informant" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" @@ -16,10 +16,10 @@ ansi_term = "0.12.1" futures = "0.3.4" log = "0.4.8" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } sp-utils = { version = "2.0.0-rc2", path = "../../primitives/utils" } sp-transaction-pool = { version = "2.0.0-rc2", path = "../../primitives/transaction-pool" } wasm-timer = "0.2" diff --git a/client/informant/README.md b/client/informant/README.md new file mode 100644 index 00000000000..b494042590a --- /dev/null +++ b/client/informant/README.md @@ -0,0 +1,3 @@ +Console informant. Prints sync progress and block events. Runs on the calling thread. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 0fe4ab5a9ba..004d829bbfa 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../primitives/application-crypto" } hex = "0.4.0" merlin = { version = "2.0", default-features = false } parking_lot = "0.10.0" diff --git a/client/keystore/README.md b/client/keystore/README.md new file mode 100644 index 00000000000..9946a61d6fd --- /dev/null +++ b/client/keystore/README.md @@ -0,0 +1,3 @@ +Keystore (and session key management) for ed25519 based chains like Polkadot. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index 53e84ae3186..23b306d178e 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "components for a light client" name = "sc-light" -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/light/README.md b/client/light/README.md new file mode 100644 index 00000000000..1ba1f155b16 --- /dev/null +++ b/client/light/README.md @@ -0,0 +1,3 @@ +Light client components. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 90d606238b6..7af59a68dfe 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0-rc5" +version = "0.8.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -19,12 +19,12 @@ futures-timer = "3.0.1" libp2p = { version = "0.23.0", default-features = false } log = "0.4.8" lru = "0.4.3" -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.2" quickcheck = "0.9.0" rand = "0.7.2" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } diff --git a/client/network-gossip/README.md b/client/network-gossip/README.md new file mode 100644 index 00000000000..9030fac0564 --- /dev/null +++ b/client/network-gossip/README.md @@ -0,0 +1,41 @@ +Polite gossiping. + +This crate provides gossiping capabilities on top of a network. + +Gossip messages are separated by two categories: "topics" and consensus engine ID. +The consensus engine ID is sent over the wire with the message, while the topic is not, +with the expectation that the topic can be derived implicitly from the content of the +message, assuming it is valid. + +Topics are a single 32-byte tag associated with a message, used to group those messages +in an opaque way. Consensus code can invoke `broadcast_topic` to attempt to send all messages +under a single topic to all peers who don't have them yet, and `send_topic` to +send all messages under a single topic to a specific peer. + +# Usage + +- Implement the `Network` trait, representing the low-level networking primitives. It is + already implemented on `sc_network::NetworkService`. +- Implement the `Validator` trait. See the section below. +- Decide on a `ConsensusEngineId`. Each gossiping protocol should have a different one. +- Build a `GossipEngine` using these three elements. +- Use the methods of the `GossipEngine` in order to send out messages and receive incoming + messages. + +# What is a validator? + +The primary role of a `Validator` is to process incoming messages from peers, and decide +whether to discard them or process them. It also decides whether to re-broadcast the message. + +The secondary role of the `Validator` is to check if a message is allowed to be sent to a given +peer. All messages, before being sent, will be checked against this filter. +This enables the validator to use information it's aware of about connected peers to decide +whether to send messages to them at any given moment in time - In particular, to wait until +peers can accept and process the message before sending it. + +Lastly, the fact that gossip validators can decide not to rebroadcast messages +opens the door for neighbor status packets to be baked into the gossip protocol. +These status packets will typically contain light pieces of information +used to inform peers of a current view of protocol state. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index de4f484535e..a5020507b50 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0-rc5" +version = "0.8.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -25,7 +25,7 @@ derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0-rc5", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-rc6", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.2" futures_codec = "0.4.0" @@ -38,23 +38,23 @@ lru = "0.4.0" nohash-hasher = "0.2.0" parking_lot = "0.10.0" pin-project = "0.4.6" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc6", path = "../../utils/prometheus" } prost = "0.6.1" rand = "0.7.2" -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-peerset = { version = "2.0.0-rc5", path = "../peerset" } +sc-block-builder = { version = "0.8.0-rc6", path = "../block-builder" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sc-peerset = { version = "2.0.0-rc6", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } +sp-arithmetic = { version = "2.0.0-rc6", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } thiserror = "1" unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } void = "1.0.2" @@ -72,10 +72,10 @@ env_logger = "0.7.0" libp2p = { version = "0.23.0", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-test-primitives = { version = "2.0.0-rc5", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +sp-test-primitives = { version = "2.0.0-rc6", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" [features] diff --git a/client/network/README.md b/client/network/README.md new file mode 100644 index 00000000000..e0bd691043b --- /dev/null +++ b/client/network/README.md @@ -0,0 +1,226 @@ +Substrate-specific P2P networking. + +**Important**: This crate is unstable and the API and usage may change. + +# Node identities and addresses + +In a decentralized network, each node possesses a network private key and a network public key. +In Substrate, the keys are based on the ed25519 curve. + +From a node's public key, we can derive its *identity*. In Substrate and libp2p, a node's +identity is represented with the [`PeerId`] struct. All network communications between nodes on +the network use encryption derived from both sides's keys, which means that **identities cannot +be faked**. + +A node's identity uniquely identifies a machine on the network. If you start two or more +clients using the same network key, large interferences will happen. + +# Substrate's network protocol + +Substrate's networking protocol is based upon libp2p. It is at the moment not possible and not +planned to permit using something else than the libp2p network stack and the rust-libp2p +library. However the libp2p framework is very flexible and the rust-libp2p library could be +extended to support a wider range of protocols than what is offered by libp2p. + +## Discovery mechanisms + +In order for our node to join a peer-to-peer network, it has to know a list of nodes that are +part of said network. This includes nodes identities and their address (how to reach them). +Building such a list is called the **discovery** mechanism. There are three mechanisms that +Substrate uses: + +- Bootstrap nodes. These are hard-coded node identities and addresses passed alongside with +the network configuration. +- mDNS. We perform a UDP broadcast on the local network. Nodes that listen may respond with +their identity. More info [here](https://github.com/libp2p/specs/blob/master/discovery/mdns.md). +mDNS can be disabled in the network configuration. +- Kademlia random walk. Once connected, we perform random Kademlia `FIND_NODE` requests on the +configured Kademlia DHTs (one per configured chain protocol) in order for nodes to propagate to +us their view of the network. More information about Kademlia can be found [on +Wikipedia](https://en.wikipedia.org/wiki/Kademlia). + +## Connection establishment + +When node Alice knows node Bob's identity and address, it can establish a connection with Bob. +All connections must always use encryption and multiplexing. While some node addresses (eg. +addresses using `/quic`) already imply which encryption and/or multiplexing to use, for others +the **multistream-select** protocol is used in order to negotiate an encryption layer and/or a +multiplexing layer. + +The connection establishment mechanism is called the **transport**. + +As of the writing of this documentation, the following base-layer protocols are supported by +Substrate: + +- TCP/IP for addresses of the form `/ip4/1.2.3.4/tcp/5`. Once the TCP connection is open, an +encryption and a multiplexing layer are negotiated on top. +- WebSockets for addresses of the form `/ip4/1.2.3.4/tcp/5/ws`. A TCP/IP connection is open and +the WebSockets protocol is negotiated on top. Communications then happen inside WebSockets data +frames. Encryption and multiplexing are additionally negotiated again inside this channel. +- DNS for addresses of the form `/dns/example.com/tcp/5` or `/dns/example.com/tcp/5/ws`. A +node's address can contain a domain name. +- (All of the above using IPv6 instead of IPv4.) + +On top of the base-layer protocol, the [Noise](https://noiseprotocol.org/) protocol is +negotiated and applied. The exact handshake protocol is experimental and is subject to change. + +The following multiplexing protocols are supported: + +- [Mplex](https://github.com/libp2p/specs/tree/master/mplex). Support for mplex will likely +be deprecated in the future. +- [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). + +## Substreams + +Once a connection has been established and uses multiplexing, substreams can be opened. When +a substream is open, the **multistream-select** protocol is used to negotiate which protocol +to use on that given substream. + +Protocols that are specific to a certain chain have a `` in their name. This +"protocol ID" is defined in the chain specifications. For example, the protocol ID of Polkadot +is "dot". In the protocol names below, `` must be replaced with the corresponding +protocol ID. + +> **Note**: It is possible for the same connection to be used for multiple chains. For example, +> one can use both the `/dot/sync/2` and `/sub/sync/2` protocols on the same +> connection, provided that the remote supports them. + +Substrate uses the following standard libp2p protocols: + +- **`/ipfs/ping/1.0.0`**. We periodically open an ephemeral substream in order to ping the +remote and check whether the connection is still alive. Failure for the remote to reply leads +to a disconnection. +- **[`/ipfs/id/1.0.0`](https://github.com/libp2p/specs/tree/master/identify)**. We +periodically open an ephemeral substream in order to ask information from the remote. +- **[`//kad`](https://github.com/libp2p/specs/pull/108)**. We periodically open +ephemeral substreams for Kademlia random walk queries. Each Kademlia query is done in a +separate substream. + +Additionally, Substrate uses the following non-libp2p-standard protocols: + +- **`/substrate//`** (where `` must be replaced with the +protocol ID of the targeted chain, and `` is a number between 2 and 6). For each +connection we optionally keep an additional substream for all Substrate-based communications alive. +This protocol is considered legacy, and is progressively being replaced with alternatives. +This is designated as "The legacy Substrate substream" in this documentation. See below for +more details. +- **`//sync/2`** is a request-response protocol (see below) that lets one perform +requests for information about blocks. Each request is the encoding of a `BlockRequest` and +each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in +this source tree. +- **`//light/2`** is a request-response protocol (see below) that lets one perform +light-client-related requests for information about the state. Each request is the encoding of +a `light::Request` and each response is the encoding of a `light::Response`, as defined in the +`light.v1.proto` file in this source tree. +- **`//transactions/1`** is a notifications protocol (see below) where +transactions are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded list of transactions, where each transaction is an opaque list of +bytes. +- **`//block-announces/1`** is a notifications protocol (see below) where +block announces are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded tuple containing a block header followed with an opaque list of +bytes containing some data associated with this block announcement, e.g. a candidate message. +- Notifications protocols that are registered using the `register_notifications_protocol` +method. For example: `/paritytech/grandpa/1`. See below for more information. + +## The legacy Substrate substream + +Substrate uses a component named the **peerset manager (PSM)**. Through the discovery +mechanism, the PSM is aware of the nodes that are part of the network and decides which nodes +we should perform Substrate-based communications with. For these nodes, we open a connection +if necessary and open a unique substream for Substrate-based communications. If the PSM decides +that we should disconnect a node, then that substream is closed. + +For more information about the PSM, see the *sc-peerset* crate. + +Note that at the moment there is no mechanism in place to solve the issues that arise where the +two sides of a connection open the unique substream simultaneously. In order to not run into +issues, only the dialer of a connection is allowed to open the unique substream. When the +substream is closed, the entire connection is closed as well. This is a bug that will be +resolved by deprecating the protocol entirely. + +Within the unique Substrate substream, messages encoded using +[*parity-scale-codec*](https://github.com/paritytech/parity-scale-codec) are exchanged. +The detail of theses messages is not totally in place, but they can be found in the +`message.rs` file. + +Once the substream is open, the first step is an exchange of a *status* message from both +sides, containing information such as the chain root hash, head of chain, and so on. + +Communications within this substream include: + +- Syncing. Blocks are announced and requested from other nodes. +- Light-client requests. When a light client requires information, a random node we have a +substream open with is chosen, and the information is requested from it. +- Gossiping. Used for example by grandpa. + +## Request-response protocols + +A so-called request-response protocol is defined as follow: + +- When a substream is opened, the opening side sends a message whose content is +protocol-specific. The message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. After the +message has been sent, the writing side is closed. +- The remote sends back the response prefixed with a LEB128-encoded length, and closes its +side as well. + +Each request is performed in a new separate substream. + +## Notifications protocols + +A so-called notifications protocol is defined as follow: + +- When a substream is opened, the opening side sends a handshake message whose content is +protocol-specific. The handshake message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. The +handshake message can be of length 0, in which case the sender has to send a single `0`. +- The receiver then either immediately closes the substream, or answers with its own +LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which +case a single `0` has to be sent back. +- Once the handshake has completed, the notifications protocol is unidirectional. Only the +node which initiated the substream can push notifications. If the remote wants to send +notifications as well, it has to open its own undirectional substream. +- Each notification must be prefixed with an LEB128-encoded length. The encoding of the +messages is specific to each protocol. +- Either party can signal that it doesn't want a notifications substream anymore by closing +its writing side. The other party should respond by closing its own writing side soon after. + +The API of `sc-network` allows one to register user-defined notification protocols. +`sc-network` automatically tries to open a substream towards each node for which the legacy +Substream substream is open. The handshake is then performed automatically. + +For example, the `sc-finality-grandpa` crate registers the `/paritytech/grandpa/1` +notifications protocol. + +At the moment, for backwards-compatibility, notification protocols are tied to the legacy +Substrate substream. Additionally, the handshake message is hardcoded to be a single 8-bits +integer representing the role of the node: + +- 1 for a full node. +- 2 for a light node. +- 4 for an authority. + +In the future, though, these restrictions will be removed. + +# Usage + +Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this +struct by passing a [`config::Params`], then poll it as if it was a `Future`. You can extract an +`Arc` from the `NetworkWorker`, which can be shared amongst multiple places +in order to give orders to the networking. + +See the [`config`] module for more information about how to configure the networking. + +After the `NetworkWorker` has been created, the important things to do are: + +- Calling `NetworkWorker::poll` in order to advance the network. This can be done by +dispatching a background task with the [`NetworkWorker`]. +- Calling `on_block_import` whenever a block is added to the client. +- Calling `on_block_finalized` whenever a block is finalized. +- Calling `trigger_repropagate` when a transaction is added to the pool. + +More precise usage details are still being worked on and will likely change in the future. + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 7c157ce1c60..f9e7ec4c89e 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Integration tests for Substrate network protocol" name = "sc-network-test" -version = "0.8.0-rc5" +version = "0.8.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,23 +13,23 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.8.0-rc5", path = "../" } +sc-network = { version = "0.8.0-rc6", path = "../" } log = "0.4.8" parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" libp2p = { version = "0.23.0", default-features = false } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../../primitives/consensus/babe" } env_logger = "0.7.0" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../../test-utils/runtime" } tempfile = "3.1.0" -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-service = { version = "0.8.0-rc6", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index ef2b00daab4..9f574ff9ebe 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,23 +13,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } fnv = "1.0.6" futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0-rc5", path = "../../primitives/offchain" } +sp-offchain = { version = "2.0.0-rc6", path = "../../primitives/offchain" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] hyper = "0.13.2" @@ -37,10 +37,10 @@ hyper-rustls = "0.21.0" [dev-dependencies] env_logger = "0.7.0" -sc-client-db = { version = "0.8.0-rc5", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-client-db = { version = "0.8.0-rc6", default-features = true, path = "../db/" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } tokio = "0.2" lazy_static = "1.4.0" diff --git a/client/offchain/README.md b/client/offchain/README.md new file mode 100644 index 00000000000..f7c097e8e0b --- /dev/null +++ b/client/offchain/README.md @@ -0,0 +1,18 @@ +Substrate offchain workers. + +The offchain workers is a special function of the runtime that +gets executed after block is imported. During execution +it's able to asynchronously submit extrinsics that will either +be propagated to other nodes or added to the next block +produced by the node as unsigned transactions. + +Offchain workers can be used for computation-heavy tasks +that are not feasible for execution during regular block processing. +It can either be tasks that no consensus is required for, +or some form of consensus over the data can be built on-chain +for instance via: +1. Challenge period for incorrect computations +2. Majority voting for results +3. etc + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 8a9aa0adb18..550217c2b06 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,7 +3,7 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" libp2p = { version = "0.23.0", default-features = false } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils"} +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" diff --git a/client/peerset/README.md b/client/peerset/README.md new file mode 100644 index 00000000000..1b54c52001c --- /dev/null +++ b/client/peerset/README.md @@ -0,0 +1,4 @@ +Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be +connected to. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 8427786919f..5708a970a1b 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-proposer-metrics" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,4 +13,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} diff --git a/client/proposer-metrics/README.md b/client/proposer-metrics/README.md new file mode 100644 index 00000000000..9669c7d3519 --- /dev/null +++ b/client/proposer-metrics/README.md @@ -0,0 +1,3 @@ +Prometheus basic proposer metrics. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 7701befbf71..95080911320 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -21,11 +21,11 @@ jsonrpc-derive = "14.2.1" jsonrpc-pubsub = "14.2.0" log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-rc5"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-rc5"} +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-rc6"} +sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-rc6"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0-rc5", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "2.0.0-rc6", path = "../../primitives/rpc" } diff --git a/client/rpc-api/README.md b/client/rpc-api/README.md new file mode 100644 index 00000000000..e860e0c2334 --- /dev/null +++ b/client/rpc-api/README.md @@ -0,0 +1,5 @@ +Substrate RPC interfaces. + +A collection of RPC methods and subscriptions supported by all substrate clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index f6a1b470249..3af5cdd039d 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-server" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -17,7 +17,7 @@ pubsub = { package = "jsonrpc-pubsub", version = "14.2.0" } log = "0.4.8" serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] http = { package = "jsonrpc-http-server", version = "14.2.0" } diff --git a/client/rpc-servers/README.md b/client/rpc-servers/README.md new file mode 100644 index 00000000000..cf00b3169a6 --- /dev/null +++ b/client/rpc-servers/README.md @@ -0,0 +1,3 @@ +Substrate RPC servers. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 5681672cc34..fe4a02aa83f 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,29 +12,29 @@ description = "Substrate Client RPC" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-rpc-api = { version = "0.8.0-rc5", path = "../rpc-api" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../rpc-api" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.4" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.2.0" log = "0.4.8" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } rpc = { package = "jsonrpc-core", version = "14.2.0" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0-rc5", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0-rc5", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-rpc = { version = "2.0.0-rc5", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "2.0.0-rc5", path = "../../primitives/chain-spec" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } +sp-session = { version = "2.0.0-rc6", path = "../../primitives/session" } +sp-offchain = { version = "2.0.0-rc6", path = "../../primitives/offchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sp-rpc = { version = "2.0.0-rc6", path = "../../primitives/rpc" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sp-chain-spec = { version = "2.0.0-rc6", path = "../../primitives/chain-spec" } +sc-executor = { version = "0.8.0-rc6", path = "../executor" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../client/block-builder" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" lazy_static = { version = "1.4.0", optional = true } @@ -42,11 +42,11 @@ lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../transaction-pool" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/README.md b/client/rpc/README.md new file mode 100644 index 00000000000..6066af4da71 --- /dev/null +++ b/client/rpc/README.md @@ -0,0 +1,5 @@ +Substrate RPC implementation. + +A core implementation of Substrate RPC interfaces. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6462549403b..fc4d3298a41 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -40,39 +40,39 @@ pin-project = "0.4.8" hash-db = "0.15.2" serde = "1.0.101" serde_json = "1.0.41" -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-session = { version = "2.0.0-rc5", path = "../../primitives/session" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../chain-spec" } -sc-light = { version = "2.0.0-rc5", path = "../light" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../db" } +sc-keystore = { version = "2.0.0-rc6", path = "../keystore" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-trie = { version = "2.0.0-rc6", path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-session = { version = "2.0.0-rc6", path = "../../primitives/session" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc6", path = "../../primitives/inherents" } +sc-network = { version = "0.8.0-rc6", path = "../network" } +sc-chain-spec = { version = "2.0.0-rc6", path = "../chain-spec" } +sc-light = { version = "2.0.0-rc6", path = "../light" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sc-client-db = { version = "0.8.0-rc6", default-features = false, path = "../db" } codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0-rc5", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0-rc5", path = "../rpc" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../primitives/block-builder" } +sc-executor = { version = "0.8.0-rc6", path = "../executor" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "2.0.0-rc6", path = "../rpc-servers" } +sc-rpc = { version = "2.0.0-rc6", path = "../rpc" } +sc-block-builder = { version = "0.8.0-rc6", path = "../block-builder" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../primitives/block-builder" } sc-informant = { version = "0.8.0-rc2", path = "../informant" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sc-offchain = { version = "2.0.0-rc5", path = "../offchain" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +sc-offchain = { version = "2.0.0-rc6", path = "../offchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} +sc-tracing = { version = "2.0.0-rc6", path = "../tracing" } tracing = "0.1.18" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } @@ -81,9 +81,9 @@ tempfile = "3.1.0" directories = "2.0.2" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0-rc5", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0-rc5", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-rc6", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.8.0-rc6", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "2.0.0-rc6", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } tokio = { version = "0.2", default-features = false } async-std = { version = "1.6", default-features = false } diff --git a/client/service/README.md b/client/service/README.md new file mode 100644 index 00000000000..26f940f16df --- /dev/null +++ b/client/service/README.md @@ -0,0 +1,4 @@ +Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +Manages communication between them. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index d8f069eadfd..016b6e37d2d 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -20,25 +20,25 @@ log = "0.4.8" env_logger = "0.7.0" fdlimit = "0.1.4" parking_lot = "0.10.0" -sc-light = { version = "2.0.0-rc5", path = "../../light" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } -sp-storage = { version = "2.0.0-rc5", path = "../../../primitives/storage" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../../db" } +sc-light = { version = "2.0.0-rc6", path = "../../light" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.8.0-rc6", path = "../../../primitives/externalities" } +sp-trie = { version = "2.0.0-rc6", path = "../../../primitives/trie" } +sp-storage = { version = "2.0.0-rc6", path = "../../../primitives/storage" } +sc-client-db = { version = "0.8.0-rc6", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../service" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../../primitives/panic-handler" } +sc-service = { version = "0.8.0-rc6", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.8.0-rc6", path = "../../network" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +sc-client-api = { version = "2.0.0-rc6", path = "../../api" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../block-builder" } +sc-executor = { version = "0.8.0-rc6", path = "../../executor" } +sp-panic-handler = { version = "2.0.0-rc6", path = "../../../primitives/panic-handler" } parity-scale-codec = "1.3.4" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 685f68f0835..f78e0ca505a 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = "0.10.0" log = "0.4.8" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/state-db/README.md b/client/state-db/README.md new file mode 100644 index 00000000000..a02b3929088 --- /dev/null +++ b/client/state-db/README.md @@ -0,0 +1,16 @@ +State database maintenance. Handles canonicalization and pruning in the database. The input to +this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that +were added or deleted during block execution. + +# Canonicalization. +Canonicalization window tracks a tree of blocks identified by header hash. The in-memory +overlay allows to get any node that was inserted in any of the blocks within the window. +The tree is journaled to the backing database and rebuilt on startup. +Canonicalization function selects one root from the top of the tree and discards all other roots and +their subtrees. + +# Pruning. +See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning +constraints are satisfied. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 4c9cc05b07f..2f3601b1729 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" diff --git a/client/telemetry/README.md b/client/telemetry/README.md new file mode 100644 index 00000000000..8fdf9e50072 --- /dev/null +++ b/client/telemetry/README.md @@ -0,0 +1,45 @@ +Telemetry utilities. + +Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. +After that, calling `slog_scope::with_logger` will return a logger that sends information to +the telemetry endpoints. The `telemetry!` macro is a short-cut for calling +`slog_scope::with_logger` followed with `slog_log!`. + +Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at +the moment. Substrate may eventually be reworked to get proper `slog` support, including sending +information to the telemetry. + +The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a +background thread/task) in order for the telemetry to properly function. Dropping the object +will also deregister the global logger and replace it with a logger that discards messages. +The `Stream` generates [`TelemetryEvent`]s. + +> **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. + +# Example + +```rust +use futures::prelude::*; + +let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { + endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ + // The `0` is the maximum verbosity level of messages to send to this endpoint. + ("wss://example.com".into(), 0) + ]).expect("Invalid URL or multiaddr provided"), + // Can be used to pass an external implementation of WebSockets. + wasm_external_transport: None, +}); + +// The `telemetry` object implements `Stream` and must be processed. +std::thread::spawn(move || { + futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); +}); + +// Sends a message on the telemetry. +sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; + "foo" => "bar", +) +``` + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 0a692cbe57f..40ab1bd4603 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-tracing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -23,4 +23,4 @@ tracing = "0.1.18" tracing-subscriber = "0.2.10" sp-tracing = { version = "2.0.0-rc2", path = "../../primitives/tracing" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } +sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } diff --git a/client/tracing/README.md b/client/tracing/README.md new file mode 100644 index 00000000000..b008436df9b --- /dev/null +++ b/client/tracing/README.md @@ -0,0 +1,11 @@ +Instrumentation implementation for substrate. + +This crate is unstable and the API and usage may change. + +# Usage + +See `sp-tracing` for examples on how to use tracing. + +Currently we provide `Log` (default), `Telemetry` variants for `Receiver` + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 56ea881d7a0..f6ef1b1322f 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -20,23 +20,23 @@ intervalier = "0.4.0" log = "0.4.8" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.10.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-transaction-graph = { version = "2.0.0-rc5", path = "./graph" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", path = "../../primitives/tracing" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} +sc-client-api = { version = "2.0.0-rc6", path = "../api" } +sc-transaction-graph = { version = "2.0.0-rc6", path = "./graph" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc6", path = "../../primitives/tracing" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils" } wasm-timer = "0.2" [dev-dependencies] assert_matches = "1.3.0" hex = "0.4" -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -substrate-test-runtime-transaction-pool = { version = "2.0.0-rc5", path = "../../test-utils/runtime/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +substrate-test-runtime-transaction-pool = { version = "2.0.0-rc6", path = "../../test-utils/runtime/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-rc6", path = "../block-builder" } diff --git a/client/transaction-pool/README.md b/client/transaction-pool/README.md new file mode 100644 index 00000000000..15e4641c1f4 --- /dev/null +++ b/client/transaction-pool/README.md @@ -0,0 +1,3 @@ +Substrate transaction pool implementation. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 8719a9c8fed..7255cf3df30 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-graph" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -18,11 +18,11 @@ log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc5", path = "../../../primitives/utils" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-utils = { version = "2.0.0-rc6", path = "../../../primitives/utils" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" retain_mut = "0.1.1" @@ -30,7 +30,7 @@ retain_mut = "0.1.1" [dev-dependencies] assert_matches = "1.3.0" codec = { package = "parity-scale-codec", version = "1.3.4" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../../test-utils/runtime" } criterion = "0.3" [[bench]] diff --git a/client/transaction-pool/graph/README.md b/client/transaction-pool/graph/README.md new file mode 100644 index 00000000000..bc9cd929122 --- /dev/null +++ b/client/transaction-pool/graph/README.md @@ -0,0 +1,8 @@ +Generic Transaction Pool + +The pool is based on dependency graph between transactions +and their priority. +The pool is able to return an iterator that traverses transaction +graph in the correct order taking into account priorities and dependencies. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 719059477e1..254c64819bd 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,11 +6,48 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.0-rc5 -> 2.0.0-rc6 – Rock Hyrax + +Runtime +------- + +* Custom Codec Implenetation for NPoS Election (#6720) +* Successful `note_imminent_preimage` is free (#6793) +* pallet-democracy use of weightinfo (#6783) +* Update Balances Pallet to use `WeightInfo` (#6610) +* pallet-evm: add builtin support for the four basic Ethereum precompiles (#6743) +* Allow `PostDispatchInfo` to disable fees (#6749) +* pallet-evm: add support for tuple-based precompile declarations (#6681) +* grandpa: allow noting that the set has stalled (#6725) + Client ------ +* Merge Subkey into sc-cli (#4954) +* RpcHandlers Refactorings (#6846) +* client/authority-discovery: Introduce AuthorityDiscoveryService (#6760) +* Implement tracing::Event handling & parent_id for spans and events (#6672) +* Move to upstream wasmtime, refactor globals snapshot (#6759) +* Revalidate transactions only on latest best block (#6824) +* Allow task manager to have children (#6771) +* client/network: Expose DHT query duration to Prometheus (#6784) +* client/network: Add peers to DHT only if protocols match (#6549) +* Name all the tasks! (#6726) * Child nodes can be handled by adding a child `TaskManager` to the parent's `TaskManager` (#6771) +API +--- + +* pow: add access to pre-digest for algorithm verifiers (#6900) +* babe, aura, pow: only call check_inherents if authoring version is compatible (#6862) +* Implement 'transactional' annotation for runtime functions. (#6763) +* seal: Change prefix and module name from "ext_" to "seal_" for contract callable functions (#6798) +* Add Subscription RPC for Grandpa Finality (#5732) +* seal: Fix and improve error reporting (#6773) +* Allow blacklisting blocks from being finalized again after block revert (#6301) +* BABE slot and epoch event notifications (#6563) +* Add `memory-tracker` feature to `sp-trie` to fix wasm panic (#6745) + ## 2.0.0-rc4 -> 2.0.0-rc5 – River Dolphin Runtime diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 47bf0cecf21..bb7c2828c30 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/assets/README.md b/frame/assets/README.md new file mode 100644 index 00000000000..dca51b7c296 --- /dev/null +++ b/frame/assets/README.md @@ -0,0 +1,116 @@ +# Assets Module + +A simple, secure module for dealing with fungible assets. + +## Overview + +The Assets module provides functionality for asset management of fungible asset classes +with a fixed supply, including: + +* Asset Issuance +* Asset Transfer +* Asset Destruction + +To use it in your runtime, you need to implement the assets [`Trait`](./trait.Trait.html). + +The supported dispatchable functions are documented in the [`Call`](./enum.Call.html) enum. + +### Terminology + +* **Asset issuance:** The creation of a new asset, whose total supply will belong to the + account that issues the asset. +* **Asset transfer:** The action of transferring assets from one account to another. +* **Asset destruction:** The process of an account removing its entire holding of an asset. +* **Fungible asset:** An asset whose units are interchangeable. +* **Non-fungible asset:** An asset for which each unit has unique characteristics. + +### Goals + +The assets system in Substrate is designed to make the following possible: + +* Issue a unique asset to its creator's account. +* Move assets between accounts. +* Remove an account's balance of an asset when requested by that account's owner and update + the asset's total supply. + +## Interface + +### Dispatchable Functions + +* `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function. +* `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of +the function caller's account (`origin`) to a `target` account. +* `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account +that called the function. + +Please refer to the [`Call`](./enum.Call.html) enum and its associated variants for documentation on each function. + +### Public Functions + + +* `balance` - Get the asset `id` balance of `who`. +* `total_supply` - Get the total supply of an asset `id`. + +Please refer to the [`Module`](./struct.Module.html) struct for details on publicly available functions. + +## Usage + +The following example shows how to use the Assets module in your runtime by exposing public functions to: + +* Issue a new fungible asset for a token distribution event (airdrop). +* Query the fungible asset holding balance of an account. +* Query the total supply of a fungible asset that has been issued. + +### Prerequisites + +Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait. + +### Simple Code Snippet + +```rust +use pallet_assets as assets; +use frame_support::{decl_module, dispatch, ensure}; +use frame_system::ensure_signed; + +pub trait Trait: assets::Trait { } + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { + let sender = ensure_signed(origin).map_err(|e| e.as_str())?; + + const ACCOUNT_ALICE: u64 = 1; + const ACCOUNT_BOB: u64 = 2; + const COUNT_AIRDROP_RECIPIENTS: u64 = 2; + const TOKENS_FIXED_SUPPLY: u64 = 100; + + ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); + + let asset_id = Self::next_asset_id(); + + >::mutate(|asset_id| *asset_id += 1); + >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert(asset_id, TOKENS_FIXED_SUPPLY); + + Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); + Ok(()) + } + } +} +``` + +## Assumptions + +Below are assumptions that must be held when using this module. If any of +them are violated, the behavior of this module is undefined. + +* The total count of assets should be less than + `Trait::AssetId::max_value()`. + +## Related Modules + +* [`System`](../frame_system/index.html) +* [`Support`](../frame_support/index.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 704d22ba780..982cd7d6cb8 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-atomic-swap" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md new file mode 100644 index 00000000000..f2be32554cb --- /dev/null +++ b/frame/atomic-swap/README.md @@ -0,0 +1,23 @@ +# Atomic Swap + +A module for atomically sending funds. + +- [`atomic_swap::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +A module for atomically sending funds from an origin to a target. A proof +is used to allow the target to approve (claim) the swap. If the swap is not +claimed within a specified duration of time, the sender may cancel it. + +## Interface + +### Dispatchable Functions + +* `create_swap` - called by a sender to register a new atomic swap +* `claim_swap` - called by the target to approve a swap +* `cancel_swap` - may be called by a sender after a specified duration + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index cc9b1bf6f66..283462f5cc6 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-aura" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,23 +12,23 @@ description = "FRAME AURA consensus pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../primitives/consensus/aura", default-features = false } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../session" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +sp-consensus-aura = { version = "0.8.0-rc6", path = "../../primitives/consensus/aura", default-features = false } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../timestamp" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } lazy_static = "1.4.0" parking_lot = "0.10.0" diff --git a/frame/aura/README.md b/frame/aura/README.md new file mode 100644 index 00000000000..59747493193 --- /dev/null +++ b/frame/aura/README.md @@ -0,0 +1,28 @@ +# Aura Module + +- [`aura::Trait`](./trait.Trait.html) +- [`Module`](./struct.Module.html) + +## Overview + +The Aura module extends Aura consensus by managing offline reporting. + +## Interface + +### Public Functions + +- `slot_duration` - Determine the Aura slot-duration based on the Timestamp module configuration. + +## Related Modules + +- [Timestamp](../pallet_timestamp/index.html): The Timestamp module is used in Aura to track +consensus rounds (via `slots`). + +## References + +If you're interested in hacking on this module, it is useful to understand the interaction with +`substrate/primitives/inherents/src/lib.rs` and, specifically, the required implementation of +[`ProvideInherent`](../sp_inherents/trait.ProvideInherent.html) and +[`ProvideInherentData`](../sp_inherents/trait.ProvideInherentData.html) to create and check inherents. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index bfd7017d06f..26fa250d720 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,20 +12,20 @@ description = "FRAME pallet for authority discovery" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-authority-discovery = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc6", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] diff --git a/frame/authority-discovery/README.md b/frame/authority-discovery/README.md new file mode 100644 index 00000000000..9a534dcbeb6 --- /dev/null +++ b/frame/authority-discovery/README.md @@ -0,0 +1,6 @@ +# Authority discovery module. + +This module is used by the `client/authority-discovery` to retrieve the +current set of authorities. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 9ddd24888ca..f351b2d6670 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authorship" -version = "2.0.0-rc5" +version = "2.0.0-rc6" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" @@ -13,17 +13,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/authorship/README.md b/frame/authorship/README.md new file mode 100644 index 00000000000..d61747da3e1 --- /dev/null +++ b/frame/authorship/README.md @@ -0,0 +1,5 @@ +Authorship tracking for FRAME runtimes. + +This tracks the current author of the block and recent uncles. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index e2577e8daf1..5b59dd6b278 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-babe" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,31 +13,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-authorship = { version = "2.0.0-rc6", default-features = false, path = "../authorship" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../session" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../timestamp" } serde = { version = "1.0.101", optional = true } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/vrf" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.8.0-rc6", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.8.0-rc6", default-features = false, path = "../../primitives/consensus/vrf" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/timestamp" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-offences = { version = "2.0.0-rc5", path = "../offences" } -pallet-staking = { version = "2.0.0-rc5", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +frame-benchmarking = { version = "2.0.0-rc6", path = "../benchmarking" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-offences = { version = "2.0.0-rc6", path = "../offences" } +pallet-staking = { version = "2.0.0-rc6", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../staking/reward-curve" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/babe/README.md b/frame/babe/README.md new file mode 100644 index 00000000000..6f20be89efc --- /dev/null +++ b/frame/babe/README.md @@ -0,0 +1,4 @@ +Consensus extension module for BABE consensus. Collects on-chain randomness +from VRF outputs and manages epoch transitions. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index e6c8eec65de..3f1a088f889 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-balances" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../transaction-payment" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-transaction-payment = { version = "2.0.0-rc6", path = "../transaction-payment" } [features] default = ["std"] diff --git a/frame/balances/README.md b/frame/balances/README.md new file mode 100644 index 00000000000..c5c578848fa --- /dev/null +++ b/frame/balances/README.md @@ -0,0 +1,122 @@ +# Balances Module + +The Balances module provides functionality for handling accounts and balances. + +- [`balances::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +The Balances module provides functions for: + +- Getting and setting free balances. +- Retrieving total, reserved and unreserved balances. +- Repatriating a reserved balance to a beneficiary account that exists. +- Transferring a balance between accounts (when not reserved). +- Slashing an account balance. +- Account creation and removal. +- Managing total issuance. +- Setting and managing locks. + +### Terminology + +- **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents +"dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) + fall below this, then the account is said to be dead; and it loses its functionality as well as any + prior history and all information on it is removed from the chain's state. + No account should ever have a total balance that is strictly between 0 and the existential + deposit (exclusive). If this ever happens, it indicates either a bug in this module or an + erroneous raw mutation of storage. + +- **Total Issuance:** The total number of units in existence in a system. + +- **Reaping an account:** The act of removing an account by resetting its nonce. Happens after its +total balance has become zero (or, strictly speaking, less than the Existential Deposit). + +- **Free Balance:** The portion of a balance that is not reserved. The free balance is the only + balance that matters for most operations. + +- **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. + Reserved balance can still be slashed, but only after all the free balance has been slashed. + +- **Imbalance:** A condition when some funds were credited or debited without equal and opposite accounting +(i.e. a difference between total issuance and account balances). Functions that result in an imbalance will +return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is +simply dropped, it should automatically maintain any book-keeping such as total issuance.) + +- **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple +locks always operate over the same funds, so they "overlay" rather than "stack". + +### Implementations + +The Balances module provides implementations for the following traits. If these traits provide the functionality +that you need, then you can avoid coupling with the Balances module. + +- [`Currency`](../frame_support/traits/trait.Currency.html): Functions for dealing with a +fungible assets system. +- [`ReservableCurrency`](../frame_support/traits/trait.ReservableCurrency.html): +Functions for dealing with assets that can be reserved from an account. +- [`LockableCurrency`](../frame_support/traits/trait.LockableCurrency.html): Functions for +dealing with accounts that allow liquidity restrictions. +- [`Imbalance`](../frame_support/traits/trait.Imbalance.html): Functions for handling +imbalances between total issuance in the system and account balances. Must be used when a function +creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). +- [`IsDeadAccount`](../frame_system/trait.IsDeadAccount.html): Determiner to say whether a +given account is unused. + +## Interface + +### Dispatchable Functions + +- `transfer` - Transfer some liquid free balance to another account. +- `set_balance` - Set the balances of a given account. The origin of this call must be root. + +## Usage + +The following examples show how to use the Balances module in your custom module. + +### Examples from the FRAME + +The Contract module uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: + +```rust +use frame_support::traits::Currency; + +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; + +``` + +The Staking module uses the `LockableCurrency` trait to lock a stash account's funds: + +```rust +use frame_support::traits::{WithdrawReasons, LockableCurrency}; +use sp_runtime::traits::Bounded; +pub trait Trait: frame_system::Trait { + type Currency: LockableCurrency; +} + +fn update_ledger( + controller: &T::AccountId, + ledger: &StakingLedger +) { + T::Currency::set_lock( + STAKING_ID, + &ledger.stash, + ledger.total, + WithdrawReasons::all() + ); + // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. +} +``` + +## Genesis config + +The Balances module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). + +## Assumptions + +* Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/benchmark/Cargo.toml b/frame/benchmark/Cargo.toml index 43e131c2dc7..f731ebcbacf 100644 --- a/frame/benchmark/Cargo.toml +++ b/frame/benchmark/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-benchmark" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std"] diff --git a/frame/benchmark/README.md b/frame/benchmark/README.md new file mode 100644 index 00000000000..e00e11292e1 --- /dev/null +++ b/frame/benchmark/README.md @@ -0,0 +1,5 @@ +A pallet that contains common runtime patterns in an isolated manner. +This pallet is **not** meant to be used in a production blockchain, just +for benchmarking and testing purposes. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 917988a825f..8011ce779df 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] linregress = "0.1" paste = "0.1" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0-rc5", path = "../../primitives/std", default-features = false } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-api = { version = "2.0.0-rc6", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "2.0.0-rc6", path = "../../primitives/std", default-features = false } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io", default-features = false } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage", default-features = false } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md new file mode 100644 index 00000000000..1e06135e345 --- /dev/null +++ b/frame/benchmarking/README.md @@ -0,0 +1,3 @@ +Macro for benchmarking a FRAME runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index b1db6fe6b97..38fb3d6cd3b 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-collective" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/collective/README.md b/frame/collective/README.md new file mode 100644 index 00000000000..e4928dbcf2d --- /dev/null +++ b/frame/collective/README.md @@ -0,0 +1,22 @@ +Collective system: Members of a set of account IDs can make their collective feelings known +through dispatched calls from one of two specialized origins. + +The membership can be provided in one of two ways: either directly, using the Root-dispatchable +function `set_members`, or indirectly, through implementing the `ChangeMembers`. +The pallet assumes that the amount of members stays at or below `MAX_MEMBERS` for its weight +calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. + +A "prime" member may be set allowing their vote to act as the default vote in case of any +abstentions after the voting period. + +Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a +number of approvals required for it to pass and be called. Motions are open for members to +vote on for a minimum period given by `MotionDuration`. As soon as the needed number of +approvals is given, the motion is closed and executed. If the number of approvals is not reached +during the voting period, then `close` may be called by any account in order to force the end +the motion explicitly. If a prime member is defined then their vote is used in place of any +abstentions and the proposal is executed if there are enough approvals counting the new votes. + +If there are not, or if no prime is set, then the motion is dropped without being executed. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 1a42af6833b..4d3083720a6 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,27 +14,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "common" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0-rc6", default-features = false, path = "common" } parity-wasm = { version = "0.41.0", default-features = false } pwasm-utils = { version = "0.14.0", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/sandbox" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.8.0-rc6", default-features = false, path = "../../primitives/sandbox" } wasmi-validation = { version = "0.3.0", default-features = false } wat = { version = "1.0", optional = true, default-features = false } [dev-dependencies] assert_matches = "1.3.0" hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", path = "../randomness-collective-flip" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "2.0.0-rc6", path = "../randomness-collective-flip" } pretty_assertions = "0.6.1" wat = "1.0" diff --git a/frame/contracts/README.md b/frame/contracts/README.md new file mode 100644 index 00000000000..f2d58048c34 --- /dev/null +++ b/frame/contracts/README.md @@ -0,0 +1,64 @@ +# Contract Module + +The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. + +- [`contract::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +This module extends accounts based on the `Currency` trait to have smart-contract functionality. It can +be used with other modules that implement accounts based on `Currency`. These "smart-contract accounts" +have the ability to instantiate smart-contracts and make calls to other contract and non-contract accounts. + +The smart-contract code is stored once in a `code_cache`, and later retrievable via its `code_hash`. +This means that multiple smart-contracts can be instantiated from the same `code_cache`, without replicating +the code each time. + +When a smart-contract is called, its associated code is retrieved via the code hash and gets executed. +This call can alter the storage entries of the smart-contract account, instantiate new smart-contracts, +or call other smart-contracts. + +Finally, when an account is reaped, its associated code and storage of the smart-contract account +will also be deleted. + +### Gas + +Senders must specify a gas limit with every call, as all instructions invoked by the smart-contract require gas. +Unused gas is refunded after the call, regardless of the execution outcome. + +If the gas limit is reached, then all calls and state changes (including balance transfers) are only +reverted at the current call's contract level. For example, if contract A calls B and B runs out of gas mid-call, +then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state +changes still persist. + +### Notable Scenarios + +Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", +and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B +fails, A can decide how to handle that failure, either proceeding or reverting A's changes. + +## Interface + +### Dispatchable functions + +* `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. +* `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. +This instantiates a new smart contract account and calls its contract deploy handler to +initialize the contract. +* `call` - Makes a call to an account, optionally transferring some balance. + +## Usage + +The Contract module is a work in progress. The following examples show how this Contract module +can be used to instantiate and call contracts. + +* [`ink`](https://github.com/paritytech/ink) is +an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing +WebAssembly based smart contracts in the Rust programming language. This is a work in progress. + +## Related Modules + +* [Balances](../pallet_balances/index.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index a8f5e407600..d397a280591 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] diff --git a/frame/contracts/common/README.md b/frame/contracts/common/README.md new file mode 100644 index 00000000000..12718cd8642 --- /dev/null +++ b/frame/contracts/common/README.md @@ -0,0 +1,3 @@ +A crate that hosts a common definitions that are relevant for the pallet-contracts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index c6b8fc8ac10..0de6bc105a9 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,14 +16,14 @@ codec = { package = "parity-scale-codec", version = "1.3.4" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc5", path = "../../../primitives/rpc" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-rc6", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0-rc5", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc5", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +pallet-contracts-primitives = { version = "2.0.0-rc6", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc6", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md new file mode 100644 index 00000000000..be6df237bf6 --- /dev/null +++ b/frame/contracts/rpc/README.md @@ -0,0 +1,3 @@ +Node-specific RPC methods for interaction with contracts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index a9266b986c6..fcb57d0a69f 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "Runtime API definition required by Contracts RPC extensions." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "../../common" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/runtime" } +pallet-contracts-primitives = { version = "2.0.0-rc6", default-features = false, path = "../../common" } [features] default = ["std"] diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md new file mode 100644 index 00000000000..d57f29a93bd --- /dev/null +++ b/frame/contracts/rpc/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition required by Contracts RPC extensions. + +This API should be imported and implemented by the runtime, +of a node that wants to use the custom RPC extension +adding Contracts access methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 3d67b4d0ec5..ac3d4419502 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-democracy" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-scheduler = { version = "2.0.0-rc5", path = "../scheduler" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-scheduler = { version = "2.0.0-rc6", path = "../scheduler" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } +substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } hex-literal = "0.2.1" [features] diff --git a/frame/democracy/README.md b/frame/democracy/README.md new file mode 100644 index 00000000000..0f836f1158c --- /dev/null +++ b/frame/democracy/README.md @@ -0,0 +1,135 @@ +# Democracy Pallet + +- [`democracy::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +The Democracy pallet handles the administration of general stakeholder voting. + +There are two different queues that a proposal can be added to before it +becomes a referendum, 1) the proposal queue consisting of all public proposals +and 2) the external queue consisting of a single proposal that originates +from one of the _external_ origins (such as a collective group). + +Every launch period - a length defined in the runtime - the Democracy pallet +launches a referendum from a proposal that it takes from either the proposal +queue or the external queue in turn. Any token holder in the system can vote +on referenda. The voting system +uses time-lock voting by allowing the token holder to set their _conviction_ +behind a vote. The conviction will dictate the length of time the tokens +will be locked, as well as the multiplier that scales the vote power. + +### Terminology + +- **Enactment Period:** The minimum period of locking and the period between a proposal being +approved and enacted. +- **Lock Period:** A period of time after proposal enactment that the tokens of _winning_ voters +will be locked. +- **Conviction:** An indication of a voter's strength of belief in their vote. An increase +of one in conviction indicates that a token holder is willing to lock their tokens for twice +as many lock periods after enactment. +- **Vote:** A value that can either be in approval ("Aye") or rejection ("Nay") + of a particular referendum. +- **Proposal:** A submission to the chain that represents an action that a proposer (either an +account or an external origin) suggests that the system adopt. +- **Referendum:** A proposal that is in the process of being voted on for + either acceptance or rejection as a change to the system. +- **Delegation:** The act of granting your voting power to the decisions of another account for + up to a certain conviction. + +### Adaptive Quorum Biasing + +A _referendum_ can be either simple majority-carries in which 50%+1 of the +votes decide the outcome or _adaptive quorum biased_. Adaptive quorum biasing +makes the threshold for passing or rejecting a referendum higher or lower +depending on how the referendum was originally proposed. There are two types of +adaptive quorum biasing: 1) _positive turnout bias_ makes a referendum +require a super-majority to pass that decreases as turnout increases and +2) _negative turnout bias_ makes a referendum require a super-majority to +reject that decreases as turnout increases. Another way to think about the +quorum biasing is that _positive bias_ referendums will be rejected by +default and _negative bias_ referendums get passed by default. + +## Interface + +### Dispatchable Functions + +#### Public + +These calls can be made from any externally held account capable of creating +a signed extrinsic. + +Basic actions: +- `propose` - Submits a sensitive action, represented as a hash. Requires a deposit. +- `second` - Signals agreement with a proposal, moves it higher on the proposal queue, and + requires a matching deposit to the original. +- `vote` - Votes in a referendum, either the vote is "Aye" to enact the proposal or "Nay" to + keep the status quo. +- `unvote` - Cancel a previous vote, this must be done by the voter before the vote ends. +- `delegate` - Delegates the voting power (tokens * conviction) to another account. +- `undelegate` - Stops the delegation of voting power to another account. + +Administration actions that can be done to any account: +- `reap_vote` - Remove some account's expired votes. +- `unlock` - Redetermine the account's balance lock, potentially making tokens available. + +Preimage actions: +- `note_preimage` - Registers the preimage for an upcoming proposal, requires + a deposit that is returned once the proposal is enacted. +- `note_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. +- `note_imminent_preimage` - Registers the preimage for an upcoming proposal. + Does not require a deposit, but the proposal must be in the dispatch queue. +- `note_imminent_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. +- `reap_preimage` - Removes the preimage for an expired proposal. Will only + work under the condition that it's the same account that noted it and + after the voting period, OR it's a different account after the enactment period. + +#### Cancellation Origin + +This call can only be made by the `CancellationOrigin`. + +- `emergency_cancel` - Schedules an emergency cancellation of a referendum. + Can only happen once to a specific referendum. + +#### ExternalOrigin + +This call can only be made by the `ExternalOrigin`. + +- `external_propose` - Schedules a proposal to become a referendum once it is is legal + for an externally proposed referendum. + +#### External Majority Origin + +This call can only be made by the `ExternalMajorityOrigin`. + +- `external_propose_majority` - Schedules a proposal to become a majority-carries + referendum once it is legal for an externally proposed referendum. + +#### External Default Origin + +This call can only be made by the `ExternalDefaultOrigin`. + +- `external_propose_default` - Schedules a proposal to become a negative-turnout-bias + referendum once it is legal for an externally proposed referendum. + +#### Fast Track Origin + +This call can only be made by the `FastTrackOrigin`. + +- `fast_track` - Schedules the current externally proposed proposal that + is "majority-carries" to become a referendum immediately. + +#### Veto Origin + +This call can only be made by the `VetoOrigin`. + +- `veto_external` - Vetoes and blacklists the external proposal hash. + +#### Root + +- `cancel_referendum` - Removes a referendum. +- `cancel_queued` - Cancels a proposal that is queued for enactment. +- `clear_public_proposal` - Removes all public proposals. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 83ac253a8ba..3f27df1c564 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md new file mode 100644 index 00000000000..651b8f6aa69 --- /dev/null +++ b/frame/elections-phragmen/README.md @@ -0,0 +1,67 @@ +# Phragmén Election Module. + +An election module based on sequential phragmen. + +### Term and Round + +The election happens in _rounds_: every `N` blocks, all previous members are retired and a new +set is elected (which may or may not have an intersection with the previous set). Each round +lasts for some number of blocks defined by `TermDuration` storage item. The words _term_ and +_round_ can be used interchangeably in this context. + +`TermDuration` might change during a round. This can shorten or extend the length of the round. +The next election round's block number is never stored but rather always checked on the fly. +Based on the current block number and `TermDuration`, the condition `BlockNumber % TermDuration +== 0` being satisfied will always trigger a new election round. + +### Voting + +Voters can vote for any set of the candidates by providing a list of account ids. Invalid votes +(voting for non-candidates) are ignored during election. Yet, a voter _might_ vote for a future +candidate. Voters reserve a bond as they vote. Each vote defines a `value`. This amount is +locked from the account of the voter and indicates the weight of the vote. Voters can update +their votes at any time by calling `vote()` again. This keeps the bond untouched but can +optionally change the locked `value`. After a round, votes are kept and might still be valid for +further rounds. A voter is responsible for calling `remove_voter` once they are done to have +their bond back and remove the lock. + +Voters also report other voters as being defunct to earn their bond. A voter is defunct once all +of the candidates that they have voted for are neither a valid candidate anymore nor a member. +Upon reporting, if the target voter is actually defunct, the reporter will be rewarded by the +voting bond of the target. The target will lose their bond and get removed. If the target is not +defunct, the reporter is slashed and removed. To prevent being reported, voters should manually +submit a `remove_voter()` as soon as they are in the defunct state. + +### Candidacy and Members + +Candidates also reserve a bond as they submit candidacy. A candidate cannot take their candidacy +back. A candidate can end up in one of the below situations: + - **Winner**: A winner is kept as a _member_. They must still have a bond in reserve and they + are automatically counted as a candidate for the next election. + - **Runner-up**: Runners-up are the best candidates immediately after the winners. The number + of runners_up to keep is configurable. Runners-up are used, in order that they are elected, + as replacements when a candidate is kicked by `[remove_member]`, or when an active member + renounces their candidacy. Runners are automatically counted as a candidate for the next + election. + - **Loser**: Any of the candidate who are not a winner are left as losers. A loser might be an + _outgoing member or runner_, meaning that they are an active member who failed to keep their + spot. An outgoing will always lose their bond. + +##### Renouncing candidacy. + +All candidates, elected or not, can renounce their candidacy. A call to [`Module::renounce_candidacy`] +will always cause the candidacy bond to be refunded. + +Note that with the members being the default candidates for the next round and votes persisting +in storage, the election system is entirely stable given no further input. This means that if +the system has a particular set of candidates `C` and voters `V` that lead to a set of members +`M` being elected, as long as `V` and `C` don't remove their candidacy and votes, `M` will keep +being re-elected at the end of each round. + +### Module Information + +- [`election_sp_phragmen::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 8226512a626..58dba26af98 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/elections/README.md b/frame/elections/README.md new file mode 100644 index 00000000000..1f6fd42331c --- /dev/null +++ b/frame/elections/README.md @@ -0,0 +1,7 @@ +Election module for stake-weighted membership selection of a collective. + +The composition of a set of account IDs works according to one or more approval votes +weighted by stake. There is a partial carry-over facility to give greater weight to those +whose voting is serially unsuccessful. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 43ecc6f3688..0f14f3afe48 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-evm" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../timestamp" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../balances" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } primitive-types = { version = "0.7.0", default-features = false, features = ["rlp"] } rlp = { version = "0.4", default-features = false } evm = { version = "0.17", default-features = false } diff --git a/frame/evm/README.md b/frame/evm/README.md new file mode 100644 index 00000000000..f8feadbf58e --- /dev/null +++ b/frame/evm/README.md @@ -0,0 +1,3 @@ +EVM execution module for Substrate + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 5f11bd54a45..d8bc2a697db 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,13 +13,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } [features] diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md new file mode 100644 index 00000000000..51ddaa3a9ec --- /dev/null +++ b/frame/example-offchain-worker/README.md @@ -0,0 +1,26 @@ +# Offchain Worker Example Module + +The Offchain Worker Example: A simple pallet demonstrating +concepts, APIs and structures common to most offchain workers. + +Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's +documentation. + +- [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + + +## Overview + +In this example we are going to build a very simplistic, naive and definitely NOT +production-ready oracle for BTC/USD price. +Offchain Worker (OCW) will be triggered after every block, fetch the current price +and prepare either signed or unsigned transaction to feed the result back on chain. +The on-chain logic will simply aggregate the results and store last `64` values to compute +the average price. +Additional logic in OCW is put in place to prevent spamming the network with both signed +and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only +one unsigned transaction floating in the network. + +License: Unlicense \ No newline at end of file diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 2f7af90d76d..29e1208419d 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../balances" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/example/README.md b/frame/example/README.md new file mode 100644 index 00000000000..8f4729a4ce4 --- /dev/null +++ b/frame/example/README.md @@ -0,0 +1,237 @@ +# Example Pallet + + +The Example: A simple example of a FRAME pallet demonstrating +concepts, APIs and structures common to most FRAME runtimes. + +Run `cargo doc --package pallet-example --open` to view this pallet's documentation. + +### Documentation Guidelines: + + + +

+ +### Documentation Template:
+ +Copy and paste this template from frame/example/src/lib.rs into file +`frame//src/lib.rs` of your own custom pallet and complete it. +

+// Add heading with custom pallet name
+
+\#  Pallet
+
+// Add simple description
+
+// Include the following links that shows what trait needs to be implemented to use the pallet
+// and the supported dispatchables that are documented in the Call enum.
+
+- \[`::Trait`](./trait.Trait.html)
+- \[`Call`](./enum.Call.html)
+- \[`Module`](./struct.Module.html)
+
+\## Overview
+
+
+// Short description of pallet's purpose.
+// Links to Traits that should be implemented.
+// What this pallet is for.
+// What functionality the pallet provides.
+// When to use the pallet (use case examples).
+// How it is used.
+// Inputs it uses and the source of each input.
+// Outputs it produces.
+
+
+
+
+\## Terminology
+
+// Add terminology used in the custom pallet. Include concepts, storage items, or actions that you think
+// deserve to be noted to give context to the rest of the documentation or pallet usage. The author needs to
+// use some judgment about what is included. We don't want a list of every storage item nor types - the user
+// can go to the code for that. For example, "transfer fee" is obvious and should not be included, but
+// "free balance" and "reserved balance" should be noted to give context to the pallet.
+// Please do not link to outside resources. The reference docs should be the ultimate source of truth.
+
+
+
+\## Goals
+
+// Add goals that the custom pallet is designed to achieve.
+
+
+
+\### Scenarios
+
+
+
+\#### 
+
+// Describe requirements prior to interacting with the custom pallet.
+// Describe the process of interacting with the custom pallet for this scenario and public API functions used.
+
+\## Interface
+
+\### Supported Origins
+
+// What origins are used and supported in this pallet (root, signed, none)
+// i.e. root when \`ensure_root\` used
+// i.e. none when \`ensure_none\` used
+// i.e. signed when \`ensure_signed\` used
+
+\`inherent\` 
+
+
+
+
+\### Types
+
+// Type aliases. Include any associated types and where the user would typically define them.
+
+\`ExampleType\` 
+
+
+
+// Reference documentation of aspects such as `storageItems` and `dispatchable` functions should only be
+// included in the https://docs.rs Rustdocs for Substrate and not repeated in the README file.
+
+\### Dispatchable Functions
+
+
+
+// A brief description of dispatchable functions and a link to the rustdoc with their actual documentation.
+
+// MUST have link to Call enum
+// MUST have origin information included in function doc
+// CAN have more info up to the user
+
+\### Public Functions
+
+
+
+// A link to the rustdoc and any notes about usage in the pallet, not for specific functions.
+// For example, in the Balances Pallet: "Note that when using the publicly exposed functions,
+// you (the runtime developer) are responsible for implementing any necessary checks
+// (e.g. that the sender is the signer) before calling a function that will affect storage."
+
+
+
+// It is up to the writer of the respective pallet (with respect to how much information to provide).
+
+\#### Public Inspection functions - Immutable (getters)
+
+// Insert a subheading for each getter function signature
+
+\##### \`example_getter_name()\`
+
+// What it returns
+// Why, when, and how often to call it
+// When it could panic or error
+// When safety issues to consider
+
+\#### Public Mutable functions (changing state)
+
+// Insert a subheading for each setter function signature
+
+\##### \`example_setter_name(origin, parameter_name: T::ExampleType)\`
+
+// What state it changes
+// Why, when, and how often to call it
+// When it could panic or error
+// When safety issues to consider
+// What parameter values are valid and why
+
+\### Storage Items
+
+// Explain any storage items included in this pallet
+
+\### Digest Items
+
+// Explain any digest items included in this pallet
+
+\### Inherent Data
+
+// Explain what inherent data (if any) is defined in the pallet and any other related types
+
+\### Events:
+
+// Insert events for this pallet if any
+
+\### Errors:
+
+// Explain what generates errors
+
+\## Usage
+
+// Insert 2-3 examples of usage and code snippets that show how to
+// use  Pallet in a custom pallet.
+
+\### Prerequisites
+
+// Show how to include necessary imports for  and derive
+// your pallet configuration trait with the `INSERT_CUSTOM_PALLET_NAME` trait.
+
+\```rust
+use ;
+
+pub trait Trait: ::Trait { }
+\```
+
+\### Simple Code Snippet
+
+// Show a simple example (e.g. how to query a public getter function of )
+
+\### Example from FRAME
+
+// Show a usage example in an actual runtime
+
+// See:
+// - Substrate TCR https://github.com/parity-samples/substrate-tcr
+// - Substrate Kitties https://shawntabrizi.github.io/substrate-collectables-workshop/#/
+
+\## Genesis Config
+
+
+
+\## Dependencies
+
+// Dependencies on other FRAME pallets and the genesis config should be mentioned,
+// but not the Rust Standard Library.
+// Genesis configuration modifications that may be made to incorporate this pallet
+// Interaction with other pallets
+
+
+
+\## Related Pallets
+
+// Interaction with other pallets in the form of a bullet point list
+
+\## References
+
+
+
+// Links to reference material, if applicable. For example, Phragmen, W3F research, etc.
+// that the implementation is based on.
+

+ +License: Unlicense \ No newline at end of file diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index ea123bd6e7f..bb2cd578978 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-executive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,22 +13,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/tracing" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/tracing" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } [dev-dependencies] hex-literal = "0.2.1" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -pallet-indices = { version = "2.0.0-rc5", path = "../indices" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../transaction-payment" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } +pallet-indices = { version = "2.0.0-rc6", path = "../indices" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-transaction-payment = { version = "2.0.0-rc6", path = "../transaction-payment" } +sp-version = { version = "2.0.0-rc6", path = "../../primitives/version" } [features] default = ["std"] diff --git a/frame/executive/README.md b/frame/executive/README.md new file mode 100644 index 00000000000..017aa5d0444 --- /dev/null +++ b/frame/executive/README.md @@ -0,0 +1,61 @@ +# Executive Module + +The Executive module acts as the orchestration layer for the runtime. It dispatches incoming +extrinsic calls to the respective modules in the runtime. + +## Overview + +The executive module is not a typical pallet providing functionality around a specific feature. +It is a cross-cutting framework component for the FRAME. It works in conjunction with the +[FRAME System module](../frame_system/index.html) to perform these cross-cutting functions. + +The Executive module provides functions to: + +- Check transaction validity. +- Initialize a block. +- Apply extrinsics. +- Execute a block. +- Finalize a block. +- Start an off-chain worker. + +### Implementations + +The Executive module provides the following implementations: + +- `ExecuteBlock`: Trait that can be used to execute a block. +- `Executive`: Type that can be used to make the FRAME available from the runtime. + +## Usage + +The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in its library. + +### Example + +`Executive` type declaration from the node template. + +```rust +# +/// Executive: handles dispatch to the various modules. +pub type Executive = executive::Executive; +``` + +### Custom `OnRuntimeUpgrade` logic + +You can add custom logic that should be called in your runtime on a runtime upgrade. This is +done by setting an optional generic parameter. The custom logic will be called before +the on runtime upgrade logic of all modules is called. + +```rust +# +struct CustomOnRuntimeUpgrade; +impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Do whatever you want. + 0 + } +} + +pub type Executive = executive::Executive; +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 9b54717e4db..2f3d504879e 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-tracker" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/finality-tracker" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/finality-tracker/README.md b/frame/finality-tracker/README.md new file mode 100644 index 00000000000..bf42605ffc6 --- /dev/null +++ b/frame/finality-tracker/README.md @@ -0,0 +1,3 @@ +FRAME Pallet that tracks the last finalized block, as perceived by block authors. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index e1e59030627..9dfc7699158 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-generic-asset" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Centrality Developers "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/generic-asset/README.md b/frame/generic-asset/README.md new file mode 100644 index 00000000000..ab82be54b20 --- /dev/null +++ b/frame/generic-asset/README.md @@ -0,0 +1,131 @@ +# Generic Asset Module + +The Generic Asset module provides functionality for handling accounts and asset balances. + +## Overview + +The Generic Asset module provides functions for: + +- Creating a new kind of asset. +- Setting permissions of an asset. +- Getting and setting free balances. +- Retrieving total, reserved and unreserved balances. +- Repatriating a reserved balance to a beneficiary account. +- Transferring a balance between accounts (when not reserved). +- Slashing an account balance. +- Managing total issuance. +- Setting and managing locks. + +### Terminology + +- **Staking Asset:** The asset for staking, to participate as Validators in the network. +- **Spending Asset:** The asset for payment, such as paying transfer fees, gas fees, etc. +- **Permissions:** A set of rules for a kind of asset, defining the allowed operations to the asset, and which +accounts are allowed to possess it. +- **Total Issuance:** The total number of units in existence in a system. +- **Free Balance:** The portion of a balance that is not reserved. The free balance is the only balance that matters +for most operations. When this balance falls below the existential deposit, most functionality of the account is +removed. When both it and the reserved balance are deleted, then the account is said to be dead. +- **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. Reserved balance +can still be slashed, but only after all the free balance has been slashed. If the reserved balance falls below the +existential deposit then it and any related functionality will be deleted. When both it and the free balance are +deleted, then the account is said to be dead. +- **Imbalance:** A condition when some assets were credited or debited without equal and opposite accounting +(i.e. a difference between total issuance and account balances). Functions that result in an imbalance will +return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is +simply dropped, it should automatically maintain any book-keeping such as total issuance.) +- **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple +locks always operate over the same funds, so they "overlay" rather than "stack". + +### Implementations + +The Generic Asset module provides `AssetCurrency`, which implements the following traits. If these traits provide +the functionality that you need, you can avoid coupling with the Generic Asset module. + +- `Currency`: Functions for dealing with a fungible assets system. +- `ReservableCurrency`: Functions for dealing with assets that can be reserved from an account. +- `LockableCurrency`: Functions for dealing with accounts that allow liquidity restrictions. +- `Imbalance`: Functions for handling imbalances between total issuance in the system and account balances. +Must be used when a function creates new assets (e.g. a reward) or destroys some assets (e.g. a system fee). + +The Generic Asset module provides two types of `AssetCurrency` as follows. + +- `StakingAssetCurrency`: Currency for staking. +- `SpendingAssetCurrency`: Currency for payments such as transfer fee, gas fee. + +## Interface + +### Dispatchable Functions + +- `create`: Create a new kind of asset. +- `transfer`: Transfer some liquid free balance to another account. +- `update_permission`: Updates permission for a given `asset_id` and an account. The origin of this call +must have update permissions. +- `mint`: Mint an asset, increases its total issuance. The origin of this call must have mint permissions. +- `burn`: Burn an asset, decreases its total issuance. The origin of this call must have burn permissions. +- `create_reserved`: Create a new kind of reserved asset. The origin of this call must be root. + +### Public Functions + +- `total_balance`: Get an account's total balance of an asset kind. +- `free_balance`: Get an account's free balance of an asset kind. +- `reserved_balance`: Get an account's reserved balance of an asset kind. +- `create_asset`: Creates an asset. +- `make_transfer`: Transfer some liquid free balance from one account to another. +This will not emit the `Transferred` event. +- `make_transfer_with_event`: Transfer some liquid free balance from one account to another. +This will emit the `Transferred` event. +- `reserve`: Moves an amount from free balance to reserved balance. +- `unreserve`: Move up to an amount from reserved balance to free balance. This function cannot fail. +- `mint_free`: Mint to an account's free balance. +- `burn_free`: Burn an account's free balance. +- `slash`: Deduct up to an amount from the combined balance of `who`, preferring to deduct from the + free balance. This function cannot fail. +- `slash_reserved`: Deduct up to an amount from reserved balance of an account. This function cannot fail. +- `repatriate_reserved`: Move up to an amount from reserved balance of an account to free balance of another +account. +- `check_permission`: Check permission to perform burn, mint or update. +- `ensure_can_withdraw`: Check if the account is able to make a withdrawal of the given amount + for the given reason. + +### Usage + +The following examples show how to use the Generic Asset Pallet in your custom pallet. + +### Examples from the FRAME pallet + +The Fees Pallet uses the `Currency` trait to handle fee charge/refund, and its types inherit from `Currency`: + +```rust +use frame_support::{ + dispatch, + traits::{Currency, ExistenceRequirement, WithdrawReason}, +}; +type AssetOf = <::Currency as Currency<::AccountId>>::Balance; + +fn charge_fee(transactor: &T::AccountId, amount: AssetOf) -> dispatch::DispatchResult { + // ... + T::Currency::withdraw( + transactor, + amount, + WithdrawReason::TransactionPayment.into(), + ExistenceRequirement::KeepAlive, + )?; + // ... + Ok(()) +} + +fn refund_fee(transactor: &T::AccountId, amount: AssetOf) -> dispatch::DispatchResult { + // ... + T::Currency::deposit_into_existing(transactor, amount)?; + // ... + Ok(()) +} + +``` + +## Genesis config + +The Generic Asset Pallet depends on the [`GenesisConfig`](./struct.GenesisConfig.html). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 004255b9e1e..fcfa15813dc 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-grandpa" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,30 +14,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-grandpa" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -pallet-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../finality-tracker" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/finality-grandpa" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/session" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-authorship = { version = "2.0.0-rc6", default-features = false, path = "../authorship" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../session" } +pallet-finality-tracker = { version = "2.0.0-rc6", default-features = false, path = "../finality-tracker" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } +frame-benchmarking = { version = "2.0.0-rc6", path = "../benchmarking" } grandpa = { package = "finality-grandpa", version = "0.12.3", features = ["derive-codec"] } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-offences = { version = "2.0.0-rc5", path = "../offences" } -pallet-staking = { version = "2.0.0-rc5", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-offences = { version = "2.0.0-rc6", path = "../offences" } +pallet-staking = { version = "2.0.0-rc6", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../timestamp" } [features] default = ["std"] diff --git a/frame/grandpa/README.md b/frame/grandpa/README.md new file mode 100644 index 00000000000..84b181a8b31 --- /dev/null +++ b/frame/grandpa/README.md @@ -0,0 +1,12 @@ +GRANDPA Consensus module for runtime. + +This manages the GRANDPA authority set ready for the native code. +These authorities are only for GRANDPA finality, not for consensus overall. + +In the future, it will also handle misbehavior reports, and on-chain +finality notifications. + +For full integration with GRANDPA, the `GrandpaApi` should be implemented. +The necessary items are re-exported via the `fg_primitives` crate. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 99e147b3141..6e6289a9dea 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-identity" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/identity/README.md b/frame/identity/README.md new file mode 100644 index 00000000000..de2f415cdf7 --- /dev/null +++ b/frame/identity/README.md @@ -0,0 +1,56 @@ +# Identity Module + +- [`identity::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +A federated naming system, allowing for multiple registrars to be added from a specified origin. +Registrars can set a fee to provide identity-verification service. Anyone can put forth a +proposed identity for a fixed deposit and ask for review by any number of registrars (paying +each of their fees). Registrar judgements are given as an `enum`, allowing for sophisticated, +multi-tier opinions. + +Some judgements are identified as *sticky*, which means they cannot be removed except by +complete removal of the identity, or by the registrar. Judgements are allowed to represent a +portion of funds that have been reserved for the registrar. + +A super-user can remove accounts and in doing so, slash the deposit. + +All accounts may also have a limited number of sub-accounts which may be specified by the owner; +by definition, these have equivalent ownership and each has an individual name. + +The number of registrars should be limited, and the deposit made sufficiently large, to ensure +no state-bloat attack is viable. + +## Interface + +### Dispatchable Functions + +#### For general users +* `set_identity` - Set the associated identity of an account; a small deposit is reserved if not + already taken. +* `clear_identity` - Remove an account's associated identity; the deposit is returned. +* `request_judgement` - Request a judgement from a registrar, paying a fee. +* `cancel_request` - Cancel the previous request for a judgement. + +#### For general users with sub-identities +* `set_subs` - Set the sub-accounts of an identity. +* `add_sub` - Add a sub-identity to an identity. +* `remove_sub` - Remove a sub-identity of an identity. +* `rename_sub` - Rename a sub-identity of an identity. +* `quit_sub` - Remove a sub-identity of an identity (called by the sub-identity). + +#### For registrars +* `set_fee` - Set the fee required to be paid for a judgement to be given by the registrar. +* `set_fields` - Set the fields that a registrar cares about in their judgements. +* `provide_judgement` - Provide a judgement to an identity. + +#### For super-users +* `add_registrar` - Add a new registrar to the system. +* `kill_identity` - Forcibly remove the associated identity; the deposit is lost. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 75fc4e2454c..8541b46c9c8 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-im-online" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,20 +12,20 @@ description = "FRAME's I'm online pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "2.0.0-rc6", default-features = false, path = "../authorship" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../session" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std", "pallet-session/historical"] diff --git a/frame/im-online/README.md b/frame/im-online/README.md new file mode 100644 index 00000000000..c85705bd0ee --- /dev/null +++ b/frame/im-online/README.md @@ -0,0 +1,51 @@ +# I'm online Module + +If the local node is a validator (i.e. contains an authority key), this module +gossips a heartbeat transaction with each new session. The heartbeat functions +as a simple mechanism to signal that the node is online in the current era. + +Received heartbeats are tracked for one era and reset with each new era. The +module exposes two public functions to query if a heartbeat has been received +in the current era or session. + +The heartbeat is a signed transaction, which was signed using the session key +and includes the recent best block number of the local validators chain as well +as the [NetworkState](../../client/offchain/struct.NetworkState.html). +It is submitted as an Unsigned Transaction via off-chain workers. + +- [`im_online::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Interface + +### Public Functions + +- `is_online` - True if the validator sent a heartbeat in the current session. + +## Usage + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_im_online::{self as im_online}; + +pub trait Trait: im_online::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _is_online = >::is_online(authority_index); + Ok(()) + } + } +} +``` + +## Dependencies + +This module depends on the [Session module](../pallet_session/index.html). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 85ab36bc039..25d5c2527a9 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-indices" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-keyring = { version = "2.0.0-rc6", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/indices/README.md b/frame/indices/README.md new file mode 100644 index 00000000000..243392780db --- /dev/null +++ b/frame/indices/README.md @@ -0,0 +1,4 @@ +An index is a short form of an address. This module handles allocation +of indices for a newly created accounts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index fd138a97c7e..8ebcce3de78 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-membership" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/membership/README.md b/frame/membership/README.md new file mode 100644 index 00000000000..a769be49705 --- /dev/null +++ b/frame/membership/README.md @@ -0,0 +1,6 @@ +# Membership Module + +Allows control of membership of a set of `AccountId`s, useful for managing membership of of a +collective. A prime member may be set. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index be508ef2c04..7e2cb28f5e4 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-metadata" -version = "11.0.0-rc5" +version = "11.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/metadata/README.md b/frame/metadata/README.md new file mode 100644 index 00000000000..423af8602e3 --- /dev/null +++ b/frame/metadata/README.md @@ -0,0 +1,7 @@ +Decodable variant of the RuntimeMetadata. + +This really doesn't belong here, but is necessary for the moment. In the future +it should be removed entirely to an external module for shimming on to the +codec-encoded metadata. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index aae3646644d..98db6477e3e 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-multisig" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/multisig/README.md b/frame/multisig/README.md new file mode 100644 index 00000000000..c7694d8cb59 --- /dev/null +++ b/frame/multisig/README.md @@ -0,0 +1,29 @@ +# Multisig Module +A module for doing multisig dispatch. + +- [`multisig::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +This module contains functionality for multi-signature dispatch, a (potentially) stateful +operation, allowing multiple signed +origins (accounts) to coordinate and dispatch a call from a well-known origin, derivable +deterministically from the set of account IDs and the threshold number of accounts from the +set that must approve it. In the case that the threshold is just one then this is a stateless +operation. This is useful for multisig wallets where cryptographic threshold signatures are +not available or desired. + +## Interface + +### Dispatchable Functions + +* `as_multi` - Approve and if possible dispatch a call from a composite origin formed from a + number of signed origins. +* `approve_as_multi` - Approve a call from a composite origin. +* `cancel_as_multi` - Cancel a call from a composite origin. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index e63ed2c600d..08446986499 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-nicks" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/nicks/README.md b/frame/nicks/README.md new file mode 100644 index 00000000000..b021357bd77 --- /dev/null +++ b/frame/nicks/README.md @@ -0,0 +1,23 @@ +# Nicks Module + +- [`nicks::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +Nicks is a trivial module for keeping track of account names on-chain. It makes no effort to +create a name hierarchy, be a DNS replacement or provide reverse lookups. + +## Interface + +### Dispatchable Functions + +* `set_name` - Set the associated name of an account; a small deposit is reserved if not already + taken. +* `clear_name` - Remove an account's associated name; the deposit is returned. +* `kill_name` - Forcibly remove the associated name; the deposit is lost. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index db0c847e9a1..1585732a9f5 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,18 +12,18 @@ description = "FRAME offences pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../balances" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/offences/README.md b/frame/offences/README.md new file mode 100644 index 00000000000..454c7effaf3 --- /dev/null +++ b/frame/offences/README.md @@ -0,0 +1,5 @@ +# Offences Module + +Tracks reported offences + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 12d4882e60b..d5bfe302cb5 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../babe" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../balances" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../grandpa" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../im-online" } -pallet-offences = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../../session" } -pallet-staking = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../system" } +pallet-babe = { version = "2.0.0-rc6", default-features = false, path = "../../babe" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../../balances" } +pallet-grandpa = { version = "2.0.0-rc6", default-features = false, path = "../../grandpa" } +pallet-im-online = { version = "2.0.0-rc6", default-features = false, path = "../../im-online" } +pallet-offences = { version = "2.0.0-rc6", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../../session" } +pallet-staking = { version = "2.0.0-rc6", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/staking" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../../timestamp" } serde = { version = "1.0.101" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/offences/benchmarking/README.md b/frame/offences/benchmarking/README.md new file mode 100644 index 00000000000..cbfe91d73a6 --- /dev/null +++ b/frame/offences/benchmarking/README.md @@ -0,0 +1,3 @@ +Offences pallet benchmarking. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 155a1395420..77c9ae8bba6 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-utility = { version = "2.0.0-rc5", path = "../utility" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-utility = { version = "2.0.0-rc6", path = "../utility" } [features] default = ["std"] diff --git a/frame/proxy/README.md b/frame/proxy/README.md new file mode 100644 index 00000000000..105cf5561ae --- /dev/null +++ b/frame/proxy/README.md @@ -0,0 +1,17 @@ +# Proxy Module +A module allowing accounts to give permission to other accounts to dispatch types of calls from +their signed origin. + +- [`proxy::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +## Interface + +### Dispatchable Functions + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 28a16dc6411..0d0c5db0f49 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md new file mode 100644 index 00000000000..318f9d0f88b --- /dev/null +++ b/frame/randomness-collective-flip/README.md @@ -0,0 +1,38 @@ +# Randomness Module + +The Randomness Collective Flip module provides a [`random`](./struct.Module.html#method.random) +function that generates low-influence random values based on the block hashes from the previous +`81` blocks. Low-influence randomness can be useful when defending against relatively weak +adversaries. Using this pallet as a randomness source is advisable primarily in low-security +situations like testing. + +## Public Functions + +See the [`Module`](./struct.Module.html) struct for details of publicly available functions. + +## Usage + +### Prerequisites + +Import the Randomness Collective Flip module and derive your module's configuration trait from +the system trait. + +### Example - Get random seed for the current block + +```rust +use frame_support::{decl_module, dispatch, traits::Randomness}; + +pub trait Trait: frame_system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn random_module_example(origin) -> dispatch::DispatchResult { + let _random_value = >::random(&b"my context"[..]); + Ok(()) + } + } +} +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 6302a817171..dfacac42fb4 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-recovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/recovery/README.md b/frame/recovery/README.md new file mode 100644 index 00000000000..30631da1d9a --- /dev/null +++ b/frame/recovery/README.md @@ -0,0 +1,134 @@ +# Recovery Pallet + +- [`recovery::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +The Recovery pallet is an M-of-N social recovery tool for users to gain +access to their accounts if the private key or other authentication mechanism +is lost. Through this pallet, a user is able to make calls on-behalf-of another +account which they have recovered. The recovery process is protected by trusted +"friends" whom the original account owner chooses. A threshold (M) out of N +friends are needed to give another account access to the recoverable account. + +### Recovery Configuration + +The recovery process for each recoverable account can be configured by the account owner. +They are able to choose: +* `friends` - The list of friends that the account owner trusts to protect the + recovery process for their account. +* `threshold` - The number of friends that need to approve a recovery process for + the account to be successfully recovered. +* `delay_period` - The minimum number of blocks after the beginning of the recovery + process that need to pass before the account can be successfully recovered. + +There is a configurable deposit that all users need to pay to create a recovery +configuration. This deposit is composed of a base deposit plus a multiplier for +the number of friends chosen. This deposit is returned in full when the account +owner removes their recovery configuration. + +### Recovery Life Cycle + +The intended life cycle of a successful recovery takes the following steps: +1. The account owner calls `create_recovery` to set up a recovery configuration + for their account. +2. At some later time, the account owner loses access to their account and wants + to recover it. Likely, they will need to create a new account and fund it with + enough balance to support the transaction fees and the deposit for the + recovery process. +3. Using this new account, they call `initiate_recovery`. +4. Then the account owner would contact their configured friends to vouch for + the recovery attempt. The account owner would provide their old account id + and the new account id, and friends would call `vouch_recovery` with those + parameters. +5. Once a threshold number of friends have vouched for the recovery attempt, + the account owner needs to wait until the delay period has passed, starting + when they initiated the recovery process. +6. Now the account owner is able to call `claim_recovery`, which subsequently + allows them to call `as_recovered` and directly make calls on-behalf-of the lost + account. +7. Using the now recovered account, the account owner can call `close_recovery` + on the recovery process they opened, reclaiming the recovery deposit they + placed. +8. Then the account owner should then call `remove_recovery` to remove the recovery + configuration on the recovered account and reclaim the recovery configuration + deposit they placed. +9. Using `as_recovered`, the account owner is able to call any other pallets + to clean up their state and reclaim any reserved or locked funds. They + can then transfer all funds from the recovered account to the new account. +10. When the recovered account becomes reaped (i.e. its free and reserved + balance drops to zero), the final recovery link is removed. + +### Malicious Recovery Attempts + +Initializing a the recovery process for a recoverable account is open and +permissionless. However, the recovery deposit is an economic deterrent that +should disincentivize would-be attackers from trying to maliciously recover +accounts. + +The recovery deposit can always be claimed by the account which is trying to +to be recovered. In the case of a malicious recovery attempt, the account +owner who still has access to their account can claim the deposit and +essentially punish the malicious user. + +Furthermore, the malicious recovery attempt can only be successful if the +attacker is also able to get enough friends to vouch for the recovery attempt. +In the case where the account owner prevents a malicious recovery process, +this pallet makes it near-zero cost to re-configure the recovery settings and +remove/replace friends who are acting inappropriately. + +### Safety Considerations + +It is important to note that this is a powerful pallet that can compromise the +security of an account if used incorrectly. Some recommended practices for users +of this pallet are: + +* Configure a significant `delay_period` for your recovery process: As long as you + have access to your recoverable account, you need only check the blockchain once + every `delay_period` blocks to ensure that no recovery attempt is successful + against your account. Using off-chain notification systems can help with this, + but ultimately, setting a large `delay_period` means that even the most skilled + attacker will need to wait this long before they can access your account. +* Use a high threshold of approvals: Setting a value of 1 for the threshold means + that any of your friends would be able to recover your account. They would + simply need to start a recovery process and approve their own process. Similarly, + a threshold of 2 would mean that any 2 friends could work together to gain + access to your account. The only way to prevent against these kinds of attacks + is to choose a high threshold of approvals and select from a diverse friend + group that would not be able to reasonably coordinate with one another. +* Reset your configuration over time: Since the entire deposit of creating a + recovery configuration is returned to the user, the only cost of updating + your recovery configuration is the transaction fees for the calls. Thus, + it is strongly encouraged to regularly update your recovery configuration + as your life changes and your relationship with new and existing friends + change as well. + +## Interface + +### Dispatchable Functions + +#### For General Users + +* `create_recovery` - Create a recovery configuration for your account and make it recoverable. +* `initiate_recovery` - Start the recovery process for a recoverable account. + +#### For Friends of a Recoverable Account +* `vouch_recovery` - As a `friend` of a recoverable account, vouch for a recovery attempt on the account. + +#### For a User Who Successfully Recovered an Account + +* `claim_recovery` - Claim access to the account that you have successfully completed the recovery process for. +* `as_recovered` - Send a transaction as an account that you have recovered. See other functions below. + +#### For the Recoverable Account + +* `close_recovery` - Close an active recovery process for your account and reclaim the recovery deposit. +* `remove_recovery` - Remove the recovery configuration from the account, making it un-recoverable. + +#### For Super Users + +* `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow + one account to access another. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 14a310ebe50..ea759b15f9e 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scheduler" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -11,17 +11,17 @@ description = "FRAME example pallet" [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core", default-features = false } +substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md new file mode 100644 index 00000000000..f51d02a1d7b --- /dev/null +++ b/frame/scheduler/README.md @@ -0,0 +1,34 @@ +# Scheduler +A module for scheduling dispatches. + +- [`scheduler::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +This module exposes capabilities for scheduling dispatches to occur at a +specified block number or at a specified period. These scheduled dispatches +may be named or anonymous and may be canceled. + +**NOTE:** The scheduled calls will be dispatched with the default filter +for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +except root which will get no filter. And not the filter contained in origin +use to call `fn schedule`. + +If a call is scheduled using proxy or whatever mecanism which adds filter, +then those filter will not be used when dispatching the schedule call. + +## Interface + +### Dispatchable Functions + +* `schedule` - schedule a dispatch, which may be periodic, to occur at a + specified block and with a specified priority. +* `cancel` - cancel a scheduled dispatch, specified by block number and + index. +* `schedule_named` - augments the `schedule` interface with an additional + `Vec` parameter that can be used for identification. +* `cancel_named` - the named complement to the cancel function. + +License: Unlicense \ No newline at end of file diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index 1c25b8abfdf..cffb408422d 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scored-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md new file mode 100644 index 00000000000..1cdbff72ef2 --- /dev/null +++ b/frame/scored-pool/README.md @@ -0,0 +1,66 @@ +# Scored Pool Module + +The module maintains a scored membership pool. Each entity in the +pool can be attributed a `Score`. From this pool a set `Members` +is constructed. This set contains the `MemberCount` highest +scoring entities. Unscored entities are never part of `Members`. + +If an entity wants to be part of the pool a deposit is required. +The deposit is returned when the entity withdraws or when it +is removed by an entity with the appropriate authority. + +Every `Period` blocks the set of `Members` is refreshed from the +highest scoring members in the pool and, no matter if changes +occurred, `T::MembershipChanged::set_members_sorted` is invoked. +On first load `T::MembershipInitialized::initialize_members` is +invoked with the initial `Members` set. + +It is possible to withdraw candidacy/resign your membership at any +time. If an entity is currently a member, this results in removal +from the `Pool` and `Members`; the entity is immediately replaced +by the next highest scoring candidate in the pool, if available. + +- [`scored_pool::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Interface + +### Public Functions + +- `submit_candidacy` - Submit candidacy to become a member. Requires a deposit. +- `withdraw_candidacy` - Withdraw candidacy. Deposit is returned. +- `score` - Attribute a quantitative score to an entity. +- `kick` - Remove an entity from the pool and members. Deposit is returned. +- `change_member_count` - Changes the amount of candidates taken into `Members`. + +## Usage + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_scored_pool::{self as scored_pool}; + +pub trait Trait: scored_pool::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn candidate(origin) -> dispatch::DispatchResult { + let who = ensure_signed(origin)?; + + let _ = >::submit_candidacy( + T::Origin::from(Some(who.clone()).into()) + ); + Ok(()) + } + } +} + +``` + +## Dependencies + +This module depends on the [System module](../frame_system/index.html). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index b4150fb8e78..81e2fc191f5 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } -sp-trie = { version = "2.0.0-rc5", optional = true, default-features = false, path = "../../primitives/trie" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../timestamp" } +sp-trie = { version = "2.0.0-rc6", optional = true, default-features = false, path = "../../primitives/trie" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] diff --git a/frame/session/README.md b/frame/session/README.md new file mode 100644 index 00000000000..387f4479826 --- /dev/null +++ b/frame/session/README.md @@ -0,0 +1,83 @@ +# Session Module + +The Session module allows validators to manage their session keys, provides a function for changing +the session length, and handles session rotation. + +- [`session::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +### Terminology + + +- **Session:** A session is a period of time that has a constant set of validators. Validators can only join +or exit the validator set at a session change. It is measured in block numbers. The block where a session is +ended is determined by the `ShouldEndSession` trait. When the session is ending, a new validator set +can be chosen by `OnSessionEnding` implementations. +- **Session key:** A session key is actually several keys kept together that provide the various signing +functions required by network authorities/validators in pursuit of their duties. +- **Validator ID:** Every account has an associated validator ID. For some simple staking systems, this +may just be the same as the account ID. For staking systems using a stash/controller model, +the validator ID would be the stash account ID of the controller. +- **Session key configuration process:** Session keys are set using `set_keys` for use not in +the next session, but the session after next. They are stored in `NextKeys`, a mapping between +the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their +session key prior to being selected as validator. +It is a public call since it uses `ensure_signed`, which checks that the origin is a signed account. +As such, the account ID of the origin stored in `NextKeys` may not necessarily be associated with +a block author or a validator. The session keys of accounts are removed once their account balance is zero. +- **Session length:** This pallet does not assume anything about the length of each session. +Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. +This pallet provides the `PeriodicSessions` struct for simple periodic sessions. +- **Session rotation configuration:** Configure as either a 'normal' (rewardable session where rewards are +applied) or 'exceptional' (slashable) session rotation. +- **Session rotation process:** At the beginning of each block, the `on_initialize` function +queries the provided implementation of `ShouldEndSession`. If the session is to end the newly +activated validator IDs and session keys are taken from storage and passed to the +`SessionHandler`. The validator set supplied by `SessionManager::new_session` and the corresponding session +keys, which may have been registered via `set_keys` during the previous session, are written +to storage where they will wait one session before being passed to the `SessionHandler` +themselves. + +### Goals + +The Session pallet is designed to make the following possible: + +- Set session keys of the validator set for upcoming sessions. +- Control the length of sessions. +- Configure and switch between either normal or exceptional session rotations. + +## Interface + +### Dispatchable Functions + +- `set_keys` - Set a validator's session keys for upcoming sessions. + +### Public Functions + +- `rotate_session` - Change to the next session. Register the new authority set. Queue changes +for next session rotation. +- `disable_index` - Disable a validator by index. +- `disable` - Disable a validator by Validator ID + +## Usage + +### Example from the FRAME + +The [Staking pallet](../pallet_staking/index.html) uses the Session pallet to get the validator set. + +```rust +use pallet_session as session; + +fn validators() -> Vec<::ValidatorId> { + >::validators() +} +``` + +## Related Modules + +- [Staking](../pallet_staking/index.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index e784ff16e85..c5e94aa61f0 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,24 +12,24 @@ description = "FRAME sessions pallet benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -pallet-staking = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../../session" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/session" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../system" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../support" } +pallet-staking = { version = "2.0.0-rc6", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "2.0.0-rc6", default-features = false, path = "../../session" } rand = { version = "0.7.2", default-features = false } [dev-dependencies] serde = { version = "1.0.101" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } -pallet-balances = { version = "2.0.0-rc5", path = "../../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../../staking/reward-curve" } +sp-io ={ version = "2.0.0-rc6", path = "../../../primitives/io" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../../timestamp" } +pallet-balances = { version = "2.0.0-rc6", path = "../../balances" } [features] default = ["std"] diff --git a/frame/session/benchmarking/README.md b/frame/session/benchmarking/README.md new file mode 100644 index 00000000000..d034a9ec732 --- /dev/null +++ b/frame/session/benchmarking/README.md @@ -0,0 +1,3 @@ +Benchmarks for the Session Pallet. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 229191c3ccb..2fd44446cc8 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-society" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/society/README.md b/frame/society/README.md new file mode 100644 index 00000000000..d73397cc99c --- /dev/null +++ b/frame/society/README.md @@ -0,0 +1,228 @@ +# Society Module + +- [`society::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +The Society module is an economic game which incentivizes users to participate +and maintain a membership society. + +### User Types + +At any point, a user in the society can be one of a: +* Bidder - A user who has submitted intention of joining the society. +* Candidate - A user who will be voted on to join the society. +* Suspended Candidate - A user who failed to win a vote. +* Member - A user who is a member of the society. +* Suspended Member - A member of the society who has accumulated too many strikes +or failed their membership challenge. + +Of the non-suspended members, there is always a: +* Head - A member who is exempt from suspension. +* Defender - A member whose membership is under question and voted on again. + +Of the non-suspended members of the society, a random set of them are chosen as +"skeptics". The mechanics of skeptics is explained in the +[member phase](#member-phase) below. + +### Mechanics + +#### Rewards + +Members are incentivized to participate in the society through rewards paid +by the Society treasury. These payments have a maturity period that the user +must wait before they are able to access the funds. + +#### Punishments + +Members can be punished by slashing the reward payouts that have not been +collected. Additionally, members can accumulate "strikes", and when they +reach a max strike limit, they become suspended. + +#### Skeptics + +During the voting period, a random set of members are selected as "skeptics". +These skeptics are expected to vote on the current candidates. If they do not vote, +their skeptic status is treated as a rejection vote, the member is deemed +"lazy", and are given a strike per missing vote. + +#### Membership Challenges + +Every challenge rotation period, an existing member will be randomly selected +to defend their membership into society. Then, other members can vote whether +this defender should stay in society. A simple majority wins vote will determine +the outcome of the user. Ties are treated as a failure of the challenge, but +assuming no one else votes, the defender always get a free vote on their +own challenge keeping them in the society. The Head member is exempt from the +negative outcome of a membership challenge. + +#### Society Treasury + +The membership society is independently funded by a treasury managed by this +module. Some subset of this treasury is placed in a Society Pot, which is used +to determine the number of accepted bids. + +#### Rate of Growth + +The membership society can grow at a rate of 10 accepted candidates per rotation period up +to the max membership threshold. Once this threshold is met, candidate selections +are stalled until there is space for new members to join. This can be resolved by +voting out existing members through the random challenges or by using governance +to increase the maximum membership count. + +### User Life Cycle + +A user can go through the following phases: + +```rust + +-------> User <----------+ + | + | + | | | ++----------------------------------------------+ +| | | | | +| | v | | +| | Bidder <-----------+ | +| | + | | +| | | + | +| | v Suspended | +| | Candidate +----> Candidate | +| | + + | +| | | | | +| + | | | +| Suspended +------>| | | +| Member | | | +| ^ | | | +| | v | | +| +-------+ Member <----------+ | +| | +| | ++------------------Society---------------------+ +``` + +#### Initialization + +The society is initialized with a single member who is automatically chosen as the Head. + +#### Bid Phase + +New users must have a bid to join the society. + +A user can make a bid by reserving a deposit. Alternatively, an already existing member +can create a bid on a user's behalf by "vouching" for them. + +A bid includes reward information that the user would like to receive for joining +the society. A vouching bid can additionally request some portion of that reward as a tip +to the voucher for vouching for the prospective candidate. + +Every rotation period, Bids are ordered by reward amount, and the module +selects as many bids the Society Pot can support for that period. + +These selected bids become candidates and move on to the Candidate phase. +Bids that were not selected stay in the bidder pool until they are selected or +a user chooses to "unbid". + +#### Candidate Phase + +Once a bidder becomes a candidate, members vote whether to approve or reject +that candidate into society. This voting process also happens during a rotation period. + +The approval and rejection criteria for candidates are not set on chain, +and may change for different societies. + +At the end of the rotation period, we collect the votes for a candidate +and randomly select a vote as the final outcome. + +```rust + [ a-accept, r-reject, s-skeptic ] ++----------------------------------+ +| | +| Member |0|1|2|3|4|5|6|7|8|9| | +| ----------------------------- | +| Vote |a|a|a|r|s|r|a|a|s|a| | +| ----------------------------- | +| Selected | | | |x| | | | | | | | +| | ++----------------------------------+ + +Result: Rejected +``` + +Each member that voted opposite to this randomly selected vote is punished by +slashing their unclaimed payouts and increasing the number of strikes they have. + +These slashed funds are given to a random user who voted the same as the +selected vote as a reward for participating in the vote. + +If the candidate wins the vote, they receive their bid reward as a future payout. +If the bid was placed by a voucher, they will receive their portion of the reward, +before the rest is paid to the winning candidate. + +One winning candidate is selected as the Head of the members. This is randomly +chosen, weighted by the number of approvals the winning candidates accumulated. + +If the candidate loses the vote, they are suspended and it is up to the Suspension +Judgement origin to determine if the candidate should go through the bidding process +again, should be accepted into the membership society, or rejected and their deposit +slashed. + +#### Member Phase + +Once a candidate becomes a member, their role is to participate in society. + +Regular participation involves voting on candidates who want to join the membership +society, and by voting in the right way, a member will accumulate future payouts. +When a payout matures, members are able to claim those payouts. + +Members can also vouch for users to join the society, and request a "tip" from +the fees the new member would collect by joining the society. This vouching +process is useful in situations where a user may not have enough balance to +satisfy the bid deposit. A member can only vouch one user at a time. + +During rotation periods, a random group of members are selected as "skeptics". +These skeptics are expected to vote on the current candidates. If they do not vote, +their skeptic status is treated as a rejection vote, the member is deemed +"lazy", and are given a strike per missing vote. + +There is a challenge period in parallel to the rotation period. During a challenge period, +a random member is selected to defend their membership to the society. Other members +make a traditional majority-wins vote to determine if the member should stay in the society. +Ties are treated as a failure of the challenge. + +If a member accumulates too many strikes or fails their membership challenge, +they will become suspended. While a member is suspended, they are unable to +claim matured payouts. It is up to the Suspension Judgement origin to determine +if the member should re-enter society or be removed from society with all their +future payouts slashed. + +## Interface + +### Dispatchable Functions + +#### For General Users + +* `bid` - A user can make a bid to join the membership society by reserving a deposit. +* `unbid` - A user can withdraw their bid for entry, the deposit is returned. + +#### For Members + +* `vouch` - A member can place a bid on behalf of a user to join the membership society. +* `unvouch` - A member can revoke their vouch for a user. +* `vote` - A member can vote to approve or reject a candidate's request to join the society. +* `defender_vote` - A member can vote to approve or reject a defender's continued membership +to the society. +* `payout` - A member can claim their first matured payment. +* `unfound` - Allow the founder to unfound the society when they are the only member. + +#### For Super Users + +* `found` - The founder origin can initiate this society. Useful for bootstrapping the Society +pallet on an already running chain. +* `judge_suspended_member` - The suspension judgement origin is able to make +judgement on a suspended member. +* `judge_suspended_candidate` - The suspension judgement origin is able to +make judgement on a suspended candidate. +* `set_max_membership` - The ROOT origin can update the maximum member count for the society. +The max membership count must be greater than 1. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index f0bc0c0ac7c..2d1487afb03 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,29 +15,29 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } -sp-io ={ version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-rc5", default-features = false, features = ["historical"], path = "../session" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-npos-elections = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/npos-elections" } +sp-io ={ version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc6", default-features = false, features = ["historical"], path = "../session" } +pallet-authorship = { version = "2.0.0-rc6", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } # Optional imports for benchmarking -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../staking/reward-curve" } +substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } +frame-benchmarking = { version = "2.0.0-rc6", path = "../benchmarking" } rand_chacha = { version = "0.2" } parking_lot = "0.10.2" env_logger = "0.7.1" diff --git a/frame/staking/README.md b/frame/staking/README.md new file mode 100644 index 00000000000..02db98ab7f0 --- /dev/null +++ b/frame/staking/README.md @@ -0,0 +1,249 @@ +# Staking Module + +The Staking module is used to manage funds at stake by network maintainers. + +- [`staking::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +The Staking module is the means by which a set of network maintainers (known as _authorities_ in +some contexts and _validators_ in others) are chosen based upon those who voluntarily place +funds under deposit. Under deposit, those funds are rewarded under normal operation but are held +at pain of _slash_ (expropriation) should the staked maintainer be found not to be discharging +its duties properly. + +### Terminology + + +- Staking: The process of locking up funds for some time, placing them at risk of slashing + (loss) in order to become a rewarded maintainer of the network. +- Validating: The process of running a node to actively maintain the network, either by + producing blocks or guaranteeing finality of the chain. +- Nominating: The process of placing staked funds behind one or more validators in order to + share in any reward, and punishment, they take. +- Stash account: The account holding an owner's funds used for staking. +- Controller account: The account that controls an owner's funds for staking. +- Era: A (whole) number of sessions, which is the period that the validator set (and each + validator's active nominator set) is recalculated and where rewards are paid out. +- Slash: The punishment of a staker by reducing its funds. + +### Goals + + +The staking system in Substrate NPoS is designed to make the following possible: + +- Stake funds that are controlled by a cold wallet. +- Withdraw some, or deposit more, funds without interrupting the role of an entity. +- Switch between roles (nominator, validator, idle) with minimal overhead. + +### Scenarios + +#### Staking + +Almost any interaction with the Staking module requires a process of _**bonding**_ (also known +as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, +which holds some or all of the funds that become frozen in place as part of the staking process, +is paired with an active **controller** account, which issues instructions on how they shall be +used. + +An account pair can become bonded using the [`bond`](./enum.Call.html#variant.bond) call. + +Stash accounts can change their associated controller using the +[`set_controller`](./enum.Call.html#variant.set_controller) call. + +There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` +and `Idle` (defined in [`StakerStatus`](./enum.StakerStatus.html)). There are three +corresponding instructions to change between roles, namely: +[`validate`](./enum.Call.html#variant.validate), +[`nominate`](./enum.Call.html#variant.nominate), and [`chill`](./enum.Call.html#variant.chill). + +#### Validating + +A **validator** takes the role of either validating blocks or ensuring their finality, +maintaining the veracity of the network. A validator should avoid both any sort of malicious +misbehavior and going offline. Bonded accounts that state interest in being a validator do NOT +get immediately chosen as a validator. Instead, they are declared as a _candidate_ and they +_might_ get elected at the _next era_ as a validator. The result of the election is determined +by nominators and their votes. + +An account can become a validator candidate via the +[`validate`](./enum.Call.html#variant.validate) call. + +#### Nomination + +A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on +a set of validators to be elected. Once interest in nomination is stated by an account, it +takes effect at the next election round. The funds in the nominator's stash account indicate the +_weight_ of its vote. Both the rewards and any punishment that a validator earns are shared +between the validator and its nominators. This rule incentivizes the nominators to NOT vote for +the misbehaving/offline validators as much as possible, simply because the nominators will also +lose funds if they vote poorly. + +An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. + +#### Rewards and Slash + +The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace +valid behavior_ while _punishing any misbehavior or lack of availability_. + +Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the +`payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the +validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each +nominator's account. + +Slashing can occur at any point in time, once misbehavior is reported. Once slashing is +determined, a value is deducted from the balance of the validator and all the nominators who +voted for this validator (values are deducted from the _stash_ account of the slashed entity). + +Slashing logic is further described in the documentation of the `slashing` module. + +Similar to slashing, rewards are also shared among a validator and its associated nominators. +Yet, the reward funds are not always transferred to the stash account and can be configured. See +[Reward Calculation](#reward-calculation) for more details. + +#### Chilling + +Finally, any of the roles above can choose to step back temporarily and just chill for a while. +This means that if they are a nominator, they will not be considered as voters anymore and if +they are validators, they will no longer be a candidate for the next election. + +An account can step back via the [`chill`](enum.Call.html#variant.chill) call. + +### Session managing + +The module implement the trait `SessionManager`. Which is the only API to query new validator +set and allowing these validator set to be rewarded once their era is ended. + +## Interface + +### Dispatchable Functions + +The dispatchable functions of the Staking module enable the steps needed for entities to accept +and change their role, alongside some helper functions to get/set the metadata of the module. + +### Public Functions + +The Staking module contains many public storage items and (im)mutable functions. + +## Usage + +### Example: Rewarding a validator by id. + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_staking::{self as staking}; + +pub trait Trait: staking::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// Reward a validator. + #[weight = 0] + pub fn reward_myself(origin) -> dispatch::DispatchResult { + let reported = ensure_signed(origin)?; + >::reward_by_ids(vec![(reported, 10)]); + Ok(()) + } + } +} +``` + +## Implementation Details + +### Era payout + +The era payout is computed using yearly inflation curve defined at +[`T::RewardCurve`](./trait.Trait.html#associatedtype.RewardCurve) as such: + +```nocompile +staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year +``` +This payout is used to reward stakers as defined in next section + +```nocompile +remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout +``` +The remaining reward is send to the configurable end-point +[`T::RewardRemainder`](./trait.Trait.html#associatedtype.RewardRemainder). + +### Reward Calculation + +Validators and nominators are rewarded at the end of each era. The total reward of an era is +calculated using the era duration and the staking rate (the total amount of tokens staked by +nominators and validators, divided by the total token supply). It aims to incentivize toward a +defined staking rate. The full specification can be found +[here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). + +Total reward is split among validators and their nominators depending on the number of points +they received during the era. Points are added to a validator using +[`reward_by_ids`](./enum.Call.html#variant.reward_by_ids) or +[`reward_by_indices`](./enum.Call.html#variant.reward_by_indices). + +[`Module`](./struct.Module.html) implements +[`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward +points to block producer and block producer of referenced uncles. + +The validator and its nominator split their reward as following: + +The validator can declare an amount, named +[`commission`](./struct.ValidatorPrefs.html#structfield.commission), that does not get shared +with the nominators at each reward payout through its +[`ValidatorPrefs`](./struct.ValidatorPrefs.html). This value gets deducted from the total reward +that is paid to the validator and its nominators. The remaining portion is split among the +validator and all of the nominators that nominated the validator, proportional to the value +staked behind this validator (_i.e._ dividing the +[`own`](./struct.Exposure.html#structfield.own) or +[`others`](./struct.Exposure.html#structfield.others) by +[`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](./struct.Exposure.html)). + +All entities who receive a reward have the option to choose their reward destination through the +[`Payee`](./struct.Payee.html) storage item (see +[`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: + +- Controller account, (obviously) not increasing the staked value. +- Stash account, not increasing the staked value. +- Stash account, also increasing the staked value. + +### Additional Fund Management Operations + +Any funds already placed into stash can be the target of the following operations: + +The controller account can free a portion (or all) of the funds using the +[`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately +accessible. Instead, a duration denoted by [`BondingDuration`](./struct.BondingDuration.html) +(in number of eras) must pass until the funds can actually be removed. Once the +`BondingDuration` is over, the [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) +call can be used to actually withdraw the funds. + +Note that there is a limitation to the number of fund-chunks that can be scheduled to be +unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +(`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful +call to `withdraw_unbonded` to remove some of the chunks. + +### Election Algorithm + +The current election algorithm is implemented based on Phragmén. The reference implementation +can be found [here](https://github.com/w3f/consensus/tree/master/NPoS). + +The election algorithm, aside from electing the validators with the most stake value and votes, +tries to divide the nominator votes among candidates in an equal manner. To further assure this, +an optional post-processing can be applied that iteratively normalizes the nominator staked +values until the total difference among votes of a particular nominator are less than a +threshold. + +## GenesisConfig + +The Staking module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). The +`GenesisConfig` is optional and allow to set some initial stakers. + +## Related Modules + +- [Balances](../pallet_balances/index.html): Used to manage values at stake. +- [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new + validators is stored in the Session module's `Validators` at the end of each era. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index 832ac622a97..ee3e8928676 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -15,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -pallet-staking = { version = "2.0.0-rc5", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../reward-curve" } -pallet-session = { version = "2.0.0-rc5", path = "../../session" } -pallet-indices = { version = "2.0.0-rc5", path = "../../indices" } -pallet-balances = { version = "2.0.0-rc5", path = "../../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } -frame-system = { version = "2.0.0-rc5", path = "../../system" } -frame-support = { version = "2.0.0-rc5", path = "../../support" } -sp-std = { version = "2.0.0-rc5", path = "../../../primitives/std" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-npos-elections = { version = "2.0.0-rc5", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-rc6", path = "..", features = ["runtime-benchmarks"] } +pallet-staking-reward-curve = { version = "2.0.0-rc6", path = "../reward-curve" } +pallet-session = { version = "2.0.0-rc6", path = "../../session" } +pallet-indices = { version = "2.0.0-rc6", path = "../../indices" } +pallet-balances = { version = "2.0.0-rc6", path = "../../balances" } +pallet-timestamp = { version = "2.0.0-rc6", path = "../../timestamp" } +frame-system = { version = "2.0.0-rc6", path = "../../system" } +frame-support = { version = "2.0.0-rc6", path = "../../support" } +sp-std = { version = "2.0.0-rc6", path = "../../../primitives/std" } +sp-io ={ version = "2.0.0-rc6", path = "../../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-npos-elections = { version = "2.0.0-rc6", path = "../../../primitives/npos-elections" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } [[bin]] name = "submit_solution" diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 25cbffda1de..a3ef91d3bc6 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -21,4 +21,4 @@ proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index ba68aa49470..eef60150558 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sudo" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/sudo/README.md b/frame/sudo/README.md new file mode 100644 index 00000000000..fb8d1974c12 --- /dev/null +++ b/frame/sudo/README.md @@ -0,0 +1,70 @@ +# Sudo Module + +- [`sudo::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +The Sudo module allows for a single account (called the "sudo key") +to execute dispatchable functions that require a `Root` call +or designate a new account to replace them as the sudo key. +Only one account can be the sudo key at a time. + +## Interface + +### Dispatchable Functions + +Only the sudo key can call the dispatchable functions from the Sudo module. + +* `sudo` - Make a `Root` call to a dispatchable function. +* `set_key` - Assign a new account to be the sudo key. + +## Usage + +### Executing Privileged Functions + +The Sudo module itself is not intended to be used within other modules. +Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other modules. +You can execute these privileged functions by calling `sudo` with the sudo key account. +Privileged functions cannot be directly executed via an extrinsic. + +Learn more about privileged functions and `Root` origin in the [`Origin`] type documentation. + +### Simple Code Snippet + +This is an example of a module that exposes a privileged function: + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_root; + +pub trait Trait: frame_system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn privileged_function(origin) -> dispatch::DispatchResult { + ensure_root(origin)?; + + // do something... + + Ok(()) + } + } +} +``` + +## Genesis Config + +The Sudo module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +You need to set an initial superuser account as the sudo `key`. + +## Related Modules + +* [Democracy](../pallet_democracy/index.html) + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html +[`Origin`]: https://docs.substrate.dev/docs/substrate-types + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 24e898e1692..005638824b0 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,25 +15,25 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "11.0.0-rc5", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0-rc5", path = "./procedural" } +frame-metadata = { version = "11.0.0-rc6", default-features = false, path = "../metadata" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +frame-support-procedural = { version = "2.0.0-rc6", path = "./procedural" } paste = "0.1.6" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc6", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0-rc5", path = "../system" } +frame-system = { version = "2.0.0-rc6", path = "../system" } parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } [features] diff --git a/frame/support/README.md b/frame/support/README.md new file mode 100644 index 00000000000..2282870aca0 --- /dev/null +++ b/frame/support/README.md @@ -0,0 +1,3 @@ +Support code for the runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 622b9246654..dc62a837916 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0-rc5", path = "./tools" } +frame-support-procedural-tools = { version = "2.0.0-rc6", path = "./tools" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full"] } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 2c9a66baac5..131d47474e7 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0-rc5", path = "./derive" } +frame-support-procedural-tools-derive = { version = "2.0.0-rc6", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index fceaeaf08d2..327409692f4 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 22d420a6175..f2f70fb9527 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0-rc6", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.8.0-rc6", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } trybuild = "1.0.17" pretty_assertions = "0.6.1" rustversion = "1.0.0" diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index e9a085f85be..5a5643fec9a 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/version" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] criterion = "0.2.11" -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } [features] default = ["std"] diff --git a/frame/system/README.md b/frame/system/README.md new file mode 100644 index 00000000000..46e48b6d527 --- /dev/null +++ b/frame/system/README.md @@ -0,0 +1,75 @@ +# System Module + +The System module provides low-level access to core types and cross-cutting utilities. +It acts as the base layer for other pallets to interact with the Substrate framework components. + +- [`system::Trait`](./trait.Trait.html) + +## Overview + +The System module defines the core data types used in a Substrate runtime. +It also provides several utility functions (see [`Module`](./struct.Module.html)) for other FRAME pallets. + +In addition, it manages the storage items for extrinsics data, indexes, event records, and digest items, +among other things that support the execution of the current block. + +It also handles low-level tasks like depositing logs, basic set up and take down of +temporary storage entries, and access to previous block hashes. + +## Interface + +### Dispatchable Functions + +The System module does not implement any dispatchable functions. + +### Public Functions + +See the [`Module`](./struct.Module.html) struct for details of publicly available functions. + +### Signed Extensions + +The System module defines the following extensions: + + - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not + exceed the limits. + - [`CheckNonce`]: Checks the nonce of the transaction. Contains a single payload of type + `T::Index`. + - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. + - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the + signed payload of the transaction. + - [`CheckSpecVersion`]: Checks that the runtime version is the same as the one used to sign the + transaction. + - [`CheckTxVersion`]: Checks that the transaction version is the same as the one used to sign the + transaction. + +Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed +extensions included in a chain. + +## Usage + +### Prerequisites + +Import the System module and derive your module's configuration trait from the system trait. + +### Example - Get extrinsic count and parent hash for the current block + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::{self as system, ensure_signed}; + +pub trait Trait: system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn system_module_example(origin) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _extrinsic_count = >::extrinsic_count(); + let _parent_hash = >::parent_hash(); + Ok(()) + } + } +} +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index e8edab81324..c0b5366b7a4 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,16 +13,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../../benchmarking" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../system" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../support" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/core" } [dev-dependencies] serde = { version = "1.0.101" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-io ={ version = "2.0.0-rc6", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/system/benchmarking/README.md b/frame/system/benchmarking/README.md new file mode 100644 index 00000000000..9718db58b37 --- /dev/null +++ b/frame/system/benchmarking/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index fa2bd7dd1b8..3c6028b4f7a 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "Runtime API definition required by System RPC extensions." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [features] diff --git a/frame/system/rpc/runtime-api/README.md b/frame/system/rpc/runtime-api/README.md new file mode 100644 index 00000000000..ab46c22a8be --- /dev/null +++ b/frame/system/rpc/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition required by System RPC extensions. + +This API should be imported and implemented by the runtime, +of a node that wants to use the custom RPC extension +adding System access methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index bb9a3266e2d..db8e488dd5d 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md new file mode 100644 index 00000000000..7cdbdf0e79b --- /dev/null +++ b/frame/timestamp/README.md @@ -0,0 +1,74 @@ +# Timestamp Module + +The Timestamp module provides functionality to get and set the on-chain time. + +- [`timestamp::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) + +## Overview + +The Timestamp module allows the validators to set and validate a timestamp with each block. + +It uses inherents for timestamp data, which is provided by the block author and validated/verified +by other validators. The timestamp can be set only once per block and must be set each block. +There could be a constraint on how much time must pass before setting the new timestamp. + +**NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using +an approach based on block numbers. The block number based time measurement can cause issues +because of cumulative calculation errors and hence should be avoided. + +## Interface + +### Dispatchable Functions + +* `set` - Sets the current time. + +### Public functions + +* `get` - Gets the current time for the current block. If this function is called prior to +setting the timestamp, it will return the timestamp of the previous block. + +### Trait Getters + +* `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. + +## Usage + +The following example shows how to use the Timestamp module in your custom module to query the current timestamp. + +### Prerequisites + +Import the Timestamp module into your custom module and derive the module configuration +trait from the timestamp trait. + +### Get current timestamp + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; + +pub trait Trait: timestamp::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn get_time(origin) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _now = >::get(); + Ok(()) + } + } +} +``` + +### Example from the FRAME + +The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +the Timestamp module for session management. + +## Related Modules + +* [Session](../pallet_session/index.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index e48488a7bcd..e0381b20aa4 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "./rpc/runtime-api" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "./rpc/runtime-api" } smallvec = "1.4.1" -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io", default-features = false } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core", default-features = false } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md new file mode 100644 index 00000000000..10ad9579e92 --- /dev/null +++ b/frame/transaction-payment/README.md @@ -0,0 +1,16 @@ +# Transaction Payment Module + +This module provides the basic logic needed to pay the absolute minimum amount needed for a +transaction to be included. This includes: + - _weight fee_: A fee proportional to amount of weight a transaction consumes. + - _length fee_: A fee proportional to the encoded length of the transaction. + - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher + chance to be included by the transaction queue. + +Additionally, this module allows one to configure: + - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. + - A means of updating the fee for the next block, via defining a multiplier, based on the + final state of the chain at the end of the previous block. This can be configured via + [`Trait::FeeMultiplierUpdate`] + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 280a4dc490e..d3d03dd1a4d 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,10 +16,10 @@ codec = { package = "parity-scale-codec", version = "1.3.1" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc5", path = "../../../primitives/rpc" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-rc6", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc6", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/README.md b/frame/transaction-payment/rpc/README.md new file mode 100644 index 00000000000..21a8a7d37ca --- /dev/null +++ b/frame/transaction-payment/rpc/README.md @@ -0,0 +1,3 @@ +RPC interface for the transaction payment module. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index dacc7bc6423..42b9fb9e64d 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../support" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../../support" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/transaction-payment/rpc/runtime-api/README.md b/frame/transaction-payment/rpc/runtime-api/README.md new file mode 100644 index 00000000000..e453d9a3b7c --- /dev/null +++ b/frame/transaction-payment/rpc/runtime-api/README.md @@ -0,0 +1,3 @@ +Runtime API definition for transaction payment module. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index a2b316a6c6a..b6ef83b32ed 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-treasury" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-rc6", default-features = false, path = "../balances" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } +sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/treasury/README.md b/frame/treasury/README.md new file mode 100644 index 00000000000..befb5811802 --- /dev/null +++ b/frame/treasury/README.md @@ -0,0 +1,72 @@ +# Treasury Module + +The Treasury module provides a "pot" of funds that can be managed by stakeholders in the +system and a structure for making spending proposals from this pot. + +- [`treasury::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +The Treasury Module itself provides the pot to store funds, and a means for stakeholders to +propose, approve, and deny expenditures. The chain will need to provide a method (e.g. +inflation, fees) for collecting funds. + +By way of example, the Council could vote to fund the Treasury with a portion of the block +reward and use the funds to pay developers. + +### Tipping + +A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be +given without first having a pre-determined stakeholder group come to consensus on how much +should be paid. + +A group of `Tippers` is determined through the config `Trait`. After half of these have declared +some amount that they believe a particular reported reason deserves, then a countdown period is +entered where any remaining members can declare their tip amounts also. After the close of the +countdown period, the median of all declared tips is paid to the reported beneficiary, along +with any finders fee, in case of a public (and bonded) original report. + +### Terminology + +- **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. +- **Beneficiary:** An account who will receive the funds from a proposal iff +the proposal is approved. +- **Deposit:** Funds that a proposer must lock when making a proposal. The +deposit will be returned or slashed if the proposal is approved or rejected +respectively. +- **Pot:** Unspent funds accumulated by the treasury module. + +Tipping protocol: +- **Tipping:** The process of gathering declarations of amounts to tip and taking the median + amount to be transferred from the treasury to a beneficiary account. +- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a + particular individual (identified by an account ID) is worthy of a recognition by the + treasury. +- **Finder:** The original public reporter of some reason for tipping. +- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, + rather than the main beneficiary. + +## Interface + +### Dispatchable Functions + +General spending/proposal protocol: +- `propose_spend` - Make a spending proposal and stake the required deposit. +- `set_pot` - Set the spendable balance of funds. +- `configure` - Configure the module's proposal requirements. +- `reject_proposal` - Reject a proposal, slashing the deposit. +- `approve_proposal` - Accept the proposal, returning the deposit. + +Tipping protocol: +- `report_awesome` - Report something worthy of a tip and register for a finders fee. +- `retract_tip` - Retract a previous (finders fee registered) report. +- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +- `tip` - Declare or redeclare an amount to tip for a particular reason. +- `close_tip` - Close and pay out a tip. + +## GenesisConfig + +The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 6a1525dcfb4..5ccc2085d97 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] default = ["std"] diff --git a/frame/utility/README.md b/frame/utility/README.md new file mode 100644 index 00000000000..84bb12f15b5 --- /dev/null +++ b/frame/utility/README.md @@ -0,0 +1,38 @@ +# Utility Module +A stateless module with helpers for dispatch management which does no re-authentication. + +- [`utility::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +This module contains two basic pieces of functionality: +- Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a + single dispatch. This can be useful to amalgamate proposals, combining `set_code` with + corresponding `set_storage`s, for efficient multiple payouts with just a single signature + verify, or in combination with one of the other two dispatch functionality. +- Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from + an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative + account IDs) and these can be stacked. This can be useful as a key management tool, where you + need multiple distinct accounts (e.g. as controllers for many staking accounts), but where + it's perfectly fine to have each of them controlled by the same underlying keypair. + Derivative accounts are, for the purposes of proxy filtering considered exactly the same as + the oigin and are thus hampered with the origin's filters. + +Since proxy filters are respected in all dispatches of this module, it should never need to be +filtered by any proxy. + +## Interface + +### Dispatchable Functions + +#### For batch dispatch +* `batch` - Dispatch multiple calls from the sender's origin. + +#### For pseudonymal dispatch +* `as_derivative` - Dispatch a call from a derivative signed origin. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 67d796a6a98..41f144503e4 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-vesting" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } +sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc6", path = "../balances" } +sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] diff --git a/frame/vesting/README.md b/frame/vesting/README.md new file mode 100644 index 00000000000..56f49db2647 --- /dev/null +++ b/frame/vesting/README.md @@ -0,0 +1,31 @@ +# Vesting Module + +- [`vesting::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) + +## Overview + +A simple module providing a means of placing a linear curve on an account's locked balance. This +module ensures that there is a lock in place preventing the balance to drop below the *unvested* +amount for any reason other than transaction fee payment. + +As the amount vested increases over time, the amount unvested reduces. However, locks remain in +place and explicit action is needed on behalf of the user to ensure that the amount locked is +equivalent to the amount remaining to be vested. This is done through a dispatchable function, +either `vest` (in typical case where the sender is calling on their own behalf) or `vest_other` +in case the sender is calling on another account's behalf. + +## Interface + +This module implements the `VestingSchedule` trait. + +### Dispatchable Functions + +- `vest` - Update the lock, reducing it in line with the amount "vested" so far. +- `vest_other` - Update the lock of another account, reducing it in line with the amount + "vested" so far. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index d38eb9aa51f..6ee6c333344 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-allocator" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,9 +13,9 @@ documentation = "https://docs.rs/sp-allocator" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", path = "../std", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-rc6", path = "../std", default-features = false } +sp-core = { version = "2.0.0-rc6", path = "../core", default-features = false } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../wasm-interface", default-features = false } log = { version = "0.4.8", optional = true } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/allocator/README.md b/primitives/allocator/README.md new file mode 100644 index 00000000000..361feaae591 --- /dev/null +++ b/primitives/allocator/README.md @@ -0,0 +1,6 @@ +Collection of allocator implementations. + +This crate provides the following allocator implementations: +- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 8ad9fdfdbb7..e1e3dd76d47 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,16 +13,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api-proc-macro = { version = "2.0.0-rc5", path = "proc-macro" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } +sp-api-proc-macro = { version = "2.0.0-rc6", path = "proc-macro" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../version" } +sp-state-machine = { version = "0.8.0-rc6", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc5", path = "../test-primitives" } +sp-test-primitives = { version = "2.0.0-rc6", path = "../test-primitives" } [features] default = [ "std" ] diff --git a/primitives/api/README.md b/primitives/api/README.md new file mode 100644 index 00000000000..551de2f82e3 --- /dev/null +++ b/primitives/api/README.md @@ -0,0 +1,17 @@ +Substrate runtime api + +The Substrate runtime api is the crucial interface between the node and the runtime. +Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. +Every Substrate user can define its own apis with +[`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in +the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). + +Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic +functionality that every runtime needs to export. + +Besides the macros and the [`Core`] runtime api, this crates provides the [`Metadata`] runtime +api, the [`ApiExt`] trait, the [`CallApiAt`] trait and the [`ConstructRuntimeApi`] trait. + +On a meta level this implies, the client calls the generated API from the client perspective. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index e267b86c8ed..b7d0bd16050 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 6b4b82e9a9e..0c321429e13 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,22 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", path = "../" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0-rc5", path = "../../version" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } +sp-api = { version = "2.0.0-rc6", path = "../" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +sp-version = { version = "2.0.0-rc6", path = "../../version" } +sp-runtime = { version = "2.0.0-rc6", path = "../../runtime" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0-rc5", path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +sp-core = { version = "2.0.0-rc6", path = "../../core" } [[bench]] name = "bench" diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 4366894dcf3..cbfb5d36234 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } [features] default = [ "std" ] diff --git a/primitives/application-crypto/README.md b/primitives/application-crypto/README.md new file mode 100644 index 00000000000..c86e33552f6 --- /dev/null +++ b/primitives/application-crypto/README.md @@ -0,0 +1,3 @@ +Traits and macros for constructing application specific strongly typed crypto wrappers. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 554e4d25320..1fb03856dd1 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" description = "Integration tests for application-crypto" @@ -13,8 +13,8 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../api" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../test-utils/runtime/client" } +sp-runtime = { version = "2.0.0-rc6", path = "../../runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../api" } +sp-application-crypto = { version = "2.0.0-rc6", path = "../" } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 83963d0c984..b4dd90736a2 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/debug-derive" } [dev-dependencies] rand = "0.7.2" diff --git a/primitives/arithmetic/README.md b/primitives/arithmetic/README.md new file mode 100644 index 00000000000..e6e52c2a826 --- /dev/null +++ b/primitives/arithmetic/README.md @@ -0,0 +1,3 @@ +Minimal fixed point arithmetic primitives and types for runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index e82821aebaf..3da97b18433 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-arithmetic = { version = "2.0.0-rc5", path = ".." } +sp-arithmetic = { version = "2.0.0-rc6", path = ".." } honggfuzz = "0.5.49" primitive-types = "0.7.0" num-bigint = "0.2" diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 4bce99247fe..d201f6a70ac 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" @@ -12,11 +12,11 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/authority-discovery/README.md b/primitives/authority-discovery/README.md new file mode 100644 index 00000000000..65c2e22dde0 --- /dev/null +++ b/primitives/authority-discovery/README.md @@ -0,0 +1,3 @@ +Runtime Api to help discover authorities. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 514bde6a553..a5a4977c696 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authorship" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" @@ -12,9 +12,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/authorship/README.md b/primitives/authorship/README.md new file mode 100644 index 00000000000..1aa1805cfc5 --- /dev/null +++ b/primitives/authorship/README.md @@ -0,0 +1,3 @@ +Authorship Primitives + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index f9a52b7505b..d6ac505c1b7 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-block-builder" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "The block builder runtime api." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/block-builder/README.md b/primitives/block-builder/README.md new file mode 100644 index 00000000000..433197d3be9 --- /dev/null +++ b/primitives/block-builder/README.md @@ -0,0 +1,3 @@ +The block builder runtime api. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 7d77ae2faa7..044130c08e5 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,8 +18,8 @@ lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc5", path = "../consensus/common" } -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sp-block-builder = { version = "2.0.0-rc5", path = "../block-builder" } -sp-state-machine = { version = "0.8.0-rc5", path = "../state-machine" } -sp-database = { version = "2.0.0-rc5", path = "../database" } +sp-consensus = { version = "0.8.0-rc6", path = "../consensus/common" } +sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } +sp-block-builder = { version = "2.0.0-rc6", path = "../block-builder" } +sp-state-machine = { version = "0.8.0-rc6", path = "../state-machine" } +sp-database = { version = "2.0.0-rc6", path = "../database" } diff --git a/primitives/blockchain/README.md b/primitives/blockchain/README.md new file mode 100644 index 00000000000..8298bfd7ae6 --- /dev/null +++ b/primitives/blockchain/README.md @@ -0,0 +1,3 @@ +Substrate blockchain traits and primitives. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml index 20915d2f2fa..6abbf80a6db 100644 --- a/primitives/chain-spec/Cargo.toml +++ b/primitives/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/chain-spec/README.md b/primitives/chain-spec/README.md new file mode 100644 index 00000000000..375f14a441a --- /dev/null +++ b/primitives/chain-spec/README.md @@ -0,0 +1,3 @@ +Types and traits related to chain specifications. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 9ed9e840d1c..b708f34efa4 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -12,13 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../api" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../runtime" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/aura/README.md b/primitives/consensus/aura/README.md new file mode 100644 index 00000000000..0f360ae67eb --- /dev/null +++ b/primitives/consensus/aura/README.md @@ -0,0 +1,3 @@ +Primitives for Aura. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index de540ebb106..e817a017cbe 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" @@ -12,18 +12,18 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } merlin = { version = "2.0", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0-rc5", optional = true, path = "../common" } -sp-consensus-slots = { version = "0.8.0-rc5", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.8.0-rc5", path = "../vrf", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0-rc6", optional = true, path = "../common" } +sp-consensus-slots = { version = "0.8.0-rc6", default-features = false, path = "../slots" } +sp-consensus-vrf = { version = "0.8.0-rc6", path = "../vrf", default-features = false } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../core" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../inherents" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/babe/README.md b/primitives/consensus/babe/README.md new file mode 100644 index 00000000000..54bae05fd6d --- /dev/null +++ b/primitives/consensus/babe/README.md @@ -0,0 +1,3 @@ +Primitives for BABE. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index d8c5073274d..4c5a0197de2 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,25 +17,25 @@ targets = ["x86_64-unknown-linux-gnu"] derive_more = "0.99.2" libp2p = { version = "0.23.0", default-features = false } log = "0.4.8" -sp-core = { path= "../../core", version = "2.0.0-rc5"} -sp-inherents = { version = "2.0.0-rc5", path = "../../inherents" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core", version = "2.0.0-rc6"} +sp-inherents = { version = "2.0.0-rc6", path = "../../inherents" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" -sp-std = { version = "2.0.0-rc5", path = "../../std" } -sp-version = { version = "2.0.0-rc5", path = "../../version" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../utils" } -sp-trie = { version = "2.0.0-rc5", path = "../../trie" } -sp-api = { version = "2.0.0-rc5", path = "../../api" } +sp-std = { version = "2.0.0-rc6", path = "../../std" } +sp-version = { version = "2.0.0-rc6", path = "../../version" } +sp-runtime = { version = "2.0.0-rc6", path = "../../runtime" } +sp-utils = { version = "2.0.0-rc6", path = "../../utils" } +sp-trie = { version = "2.0.0-rc6", path = "../../trie" } +sp-api = { version = "2.0.0-rc6", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc6"} wasm-timer = "0.2.4" [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc5", path = "../../test-primitives" } +sp-test-primitives = { version = "2.0.0-rc6", path = "../../test-primitives" } [features] default = [] diff --git a/primitives/consensus/common/README.md b/primitives/consensus/common/README.md new file mode 100644 index 00000000000..963bb0fbdba --- /dev/null +++ b/primitives/consensus/common/README.md @@ -0,0 +1,7 @@ +Common utilities for building and using consensus engines in substrate. + +Much of this crate is _unstable_ and thus the API is likely to undergo +change. Implementors of traits should not rely on the interfaces to remain +the same. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 598ff0ecb29..03376907a93 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -12,10 +12,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../api" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../runtime" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/consensus/pow/README.md b/primitives/consensus/pow/README.md new file mode 100644 index 00000000000..88186437764 --- /dev/null +++ b/primitives/consensus/pow/README.md @@ -0,0 +1,3 @@ +Primitives for Substrate Proof-of-Work (PoW) consensus. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index ebf2be94a3f..ada913b645c 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" edition = "2018" diff --git a/primitives/consensus/slots/README.md b/primitives/consensus/slots/README.md new file mode 100644 index 00000000000..f451c32888a --- /dev/null +++ b/primitives/consensus/slots/README.md @@ -0,0 +1,3 @@ +Primitives for slots-based consensus engines. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index b5934a8a263..7cf064e9f62 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-vrf" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" edition = "2018" @@ -14,9 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -sp-std = { version = "2.0.0-rc5", path = "../../std", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../../core", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } +sp-std = { version = "2.0.0-rc6", path = "../../std", default-features = false } +sp-core = { version = "2.0.0-rc6", path = "../../core", default-features = false } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/consensus/vrf/README.md b/primitives/consensus/vrf/README.md new file mode 100644 index 00000000000..d66490e023b --- /dev/null +++ b/primitives/consensus/vrf/README.md @@ -0,0 +1,3 @@ +Primitives for VRF-based consensus engines. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 6787efbd845..f74b0c2738e 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } @@ -34,9 +34,9 @@ zeroize = { version = "1.0.0", default-features = false } secrecy = { version = "0.6.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } +sp-debug-derive = { version = "2.0.0-rc6", path = "../debug-derive" } +sp-externalities = { version = "0.8.0-rc6", optional = true, path = "../externalities" } +sp-storage = { version = "2.0.0-rc6", default-features = false, path = "../storage" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } dyn-clonable = { version = "0.9.0", optional = true } @@ -52,10 +52,10 @@ twox-hash = { version = "1.5.0", default-features = false, optional = true } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "2.0.0-rc6", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc5", path = "../serializer" } +sp-serializer = { version = "2.0.0-rc6", path = "../serializer" } pretty_assertions = "0.6.1" hex-literal = "0.2.1" rand = "0.7.2" diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index dd9c5807875..da909ddc651 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-database" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/database/README.md b/primitives/database/README.md new file mode 100644 index 00000000000..cd0677eb9eb --- /dev/null +++ b/primitives/database/README.md @@ -0,0 +1,3 @@ +The main database trait, allowing Substrate to store data persistently. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 38efaed1614..99481782693 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 62a2413f335..17184ca6940 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0-rc5" +version = "0.8.0-rc6" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sp-externalities" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-storage = { version = "2.0.0-rc5", path = "../storage" } -sp-std = { version = "2.0.0-rc5", path = "../std" } +sp-storage = { version = "2.0.0-rc6", path = "../storage" } +sp-std = { version = "2.0.0-rc6", path = "../std" } environmental = { version = "1.1.1" } codec = { package = "parity-scale-codec", version = "1.3.1" } diff --git a/primitives/externalities/README.md b/primitives/externalities/README.md new file mode 100644 index 00000000000..3141b2609e6 --- /dev/null +++ b/primitives/externalities/README.md @@ -0,0 +1,9 @@ +Substrate externalities abstraction + +The externalities mainly provide access to storage and to registered extensions. Extensions +are for example the keystore or the offchain externalities. These externalities are used to +access the node from the runtime via the runtime interfaces. + +This crate exposes the main [`Externalities`] trait. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 8fc318df458..8309eccccb0 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/finality-grandpa/README.md b/primitives/finality-grandpa/README.md new file mode 100644 index 00000000000..77a7abca2ee --- /dev/null +++ b/primitives/finality-grandpa/README.md @@ -0,0 +1,3 @@ +Primitives for GRANDPA integration, suitable for WASM compilation. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index 67a3e836567..31db1e683a8 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } [features] default = ["std"] diff --git a/primitives/finality-tracker/README.md b/primitives/finality-tracker/README.md new file mode 100644 index 00000000000..f9778e38a2b --- /dev/null +++ b/primitives/finality-tracker/README.md @@ -0,0 +1,3 @@ +FRAME module that tracks the last finalized block, as perceived by block authors. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index fee8449a3b2..c6744925966 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/inherents/README.md b/primitives/inherents/README.md new file mode 100644 index 00000000000..78aa625fe85 --- /dev/null +++ b/primitives/inherents/README.md @@ -0,0 +1,17 @@ +Provides types and traits for creating and checking inherents. + +Each inherent is added to a produced block. Each runtime decides on which inherents it +wants to attach to its blocks. All data that is required for the runtime to create the inherents +is stored in the `InherentData`. This `InherentData` is constructed by the node and given to +the runtime. + +Types that provide data for inherents, should implement `InherentDataProvider` and need to be +registered at `InherentDataProviders`. + +In the runtime, modules need to implement `ProvideInherent` when they can create and/or check +inherents. By implementing `ProvideInherent`, a module is not enforced to create an inherent. +A module can also just check given inherents. For using a module as inherent provider, it needs +to be registered by the `construct_runtime!` macro. The macro documentation gives more +information on how that is done. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 04ee4efd97a..a08451db243 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0-rc5", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../tracing" } +sp-state-machine = { version = "0.8.0-rc6", optional = true, path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../../primitives/wasm-interface", default-features = false } +sp-runtime-interface = { version = "2.0.0-rc6", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "2.0.0-rc6", optional = true, path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-rc6", optional = true, path = "../externalities" } +sp-tracing = { version = "2.0.0-rc6", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.10.0", optional = true } diff --git a/primitives/io/README.md b/primitives/io/README.md new file mode 100644 index 00000000000..a24370cc566 --- /dev/null +++ b/primitives/io/README.md @@ -0,0 +1,3 @@ +I/O host interface for substrate runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 05ca4681332..e3634d9bb5f 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +sp-core = { version = "2.0.0-rc6", path = "../core" } +sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } lazy_static = "1.4.0" strum = { version = "0.16.0", features = ["derive"] } diff --git a/primitives/keyring/README.md b/primitives/keyring/README.md new file mode 100644 index 00000000000..1610f237df9 --- /dev/null +++ b/primitives/keyring/README.md @@ -0,0 +1,3 @@ +Support code for the runtime. A set of test accounts. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index ff843865d67..26043df84f7 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-npos-elections-compact = { version = "2.0.0-rc5", path = "./compact" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-npos-elections-compact = { version = "2.0.0-rc6", path = "./compact" } +sp-arithmetic = { version = "2.0.0-rc6", default-features = false, path = "../arithmetic" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } rand = "0.7.3" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } [features] default = ["std"] diff --git a/primitives/npos-elections/README.md b/primitives/npos-elections/README.md new file mode 100644 index 00000000000..a98351a6d89 --- /dev/null +++ b/primitives/npos-elections/README.md @@ -0,0 +1,12 @@ +A set of election algorithms to be used with a substrate runtime, typically within the staking +sub-system. Notable implementation include + +- [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast + election method that ensures PJR, but does not provide a constant factor approximation of the + maximin problem. +- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can + increase a solutions score, as described in [`evaluate_support`]. + +More information can be found at: https://arxiv.org/abs/2004.12990 + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index 4abe79b77f0..7f55fe6bea1 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections-compact" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 7969a68c6bc..f0c9442aade 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,9 +14,9 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-npos-elections = { version = "2.0.0-rc5", path = ".." } -sp-std = { version = "2.0.0-rc5", path = "../../std" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } +sp-npos-elections = { version = "2.0.0-rc6", path = ".." } +sp-std = { version = "2.0.0-rc6", path = "../../std" } +sp-runtime = { version = "2.0.0-rc6", path = "../../runtime" } honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index a2738ab26e1..46c4f2144f9 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -12,12 +12,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } [dev-dependencies] -sp-state-machine = { version = "0.8.0-rc5", default-features = false, path = "../state-machine" } +sp-state-machine = { version = "0.8.0-rc6", default-features = false, path = "../state-machine" } [features] default = ["std"] diff --git a/primitives/offchain/README.md b/primitives/offchain/README.md new file mode 100644 index 00000000000..a8620d3bb9d --- /dev/null +++ b/primitives/offchain/README.md @@ -0,0 +1,3 @@ +The Offchain Worker runtime api primitives. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index b778c1106b8..eb0e3bd9a2a 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/panic-handler/README.md b/primitives/panic-handler/README.md new file mode 100644 index 00000000000..c08396960f4 --- /dev/null +++ b/primitives/panic-handler/README.md @@ -0,0 +1,10 @@ +Custom panic hook with bug report link + +This crate provides the [`set`] function, which wraps around [`std::panic::set_hook`] and +sets up a panic hook that prints a backtrace and invites the user to open an issue to the +given URL. + +By default, the panic handler aborts the process by calling [`std::process::exit`]. This can +temporarily be disabled by using an [`AbortGuard`]. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index c2211f6fb91..a524ccfe785 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-rpc" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../core" } +sp-core = { version = "2.0.0-rc6", path = "../core" } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitives/rpc/README.md b/primitives/rpc/README.md new file mode 100644 index 00000000000..8a9c17edd47 --- /dev/null +++ b/primitives/rpc/README.md @@ -0,0 +1,3 @@ +Substrate RPC primitives and utilities. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index f16000bff49..466e5eeccf5 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,21 +13,21 @@ documentation = "https://docs.rs/sp-runtime-interface/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-wasm-interface = { version = "2.0.0-rc5", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "2.0.0-rc5", path = "proc-macro" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } +sp-wasm-interface = { version = "2.0.0-rc6", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-tracing = { version = "2.0.0-rc6", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "2.0.0-rc6", path = "proc-macro" } +sp-externalities = { version = "0.8.0-rc6", optional = true, path = "../externalities" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } -sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } +sp-storage = { version = "2.0.0-rc6", default-features = false, path = "../storage" } [dev-dependencies] -sp-runtime-interface-test-wasm = { version = "2.0.0-rc5", path = "test-wasm" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-io = { version = "2.0.0-rc5", path = "../io" } +sp-runtime-interface-test-wasm = { version = "2.0.0-rc6", path = "test-wasm" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0-rc6", path = "../core" } +sp-io = { version = "2.0.0-rc6", path = "../io" } rustversion = "1.0.0" trybuild = "1.0.23" diff --git a/primitives/runtime-interface/README.md b/primitives/runtime-interface/README.md new file mode 100644 index 00000000000..666bfe4d5a8 --- /dev/null +++ b/primitives/runtime-interface/README.md @@ -0,0 +1,88 @@ +Substrate runtime interface + +This crate provides types, traits and macros around runtime interfaces. A runtime interface is +a fixed interface between a Substrate runtime and a Substrate node. For a native runtime the +interface maps to a direct function call of the implementation. For a wasm runtime the interface +maps to an external function call. These external functions are exported by the wasm executor +and they map to the same implementation as the native calls. + +# Using a type in a runtime interface + +Any type that should be used in a runtime interface as argument or return value needs to +implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used +in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. +The slice pointer and the length will be mapped to an `u64` value. For more information see +this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from +the wasm runtime into the node. + +Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +Depending on where and how a type should be used in a function signature, a combination of the +following traits need to be implemented: + +1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] +2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] +3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] + +The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and +primitive types. + +For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +how a type is passed between the wasm runtime and the node. Each strategy also provides a derive +macro to simplify the implementation. + +# Performance + +To not waste any more performance when calling into the node, not all types are SCALE encoded +when being passed as arguments between the wasm runtime and the node. For most types that +are raw bytes like `Vec`, `[u8]` or `[u8; N]` we pass them directly, without SCALE encoding +them in front of. The implementation of [`RIType`] each type provides more information on how +the data is passed. + +# Declaring a runtime interface + +Declaring a runtime interface is similar to declaring a trait in Rust: + +```rust +#[sp_runtime_interface::runtime_interface] +trait RuntimeInterface { + fn some_function(value: &[u8]) -> bool { + value.iter().all(|v| *v > 125) + } +} +``` + +For more information on declaring a runtime interface, see +[`#[runtime_interface]`](attr.runtime_interface.html). + +# FFI type and conversion + +The following table documents how values of types are passed between the wasm and +the host side and how they are converted into the corresponding type. + +| Type | FFI type | Conversion | +|----|----|----| +| `u8` | `u8` | `Identity` | +| `u16` | `u16` | `Identity` | +| `u32` | `u32` | `Identity` | +| `u64` | `u64` | `Identity` | +| `i128` | `u32` | `v.as_ptr()` (pointer to a 16 byte array) | +| `i8` | `i8` | `Identity` | +| `i16` | `i16` | `Identity` | +| `i32` | `i32` | `Identity` | +| `i64` | `i64` | `Identity` | +| `u128` | `u32` | `v.as_ptr()` (pointer to a 16 byte array) | +| `bool` | `u8` | `if v { 1 } else { 0 }` | +| `&str` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `&[u8]` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `Vec` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `Vec where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| `&[T] where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| `[u8; N]` | `u32` | `v.as_ptr()` | +| `*const T` | `u32` | `Identity` | +| `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | +| [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | + +`Identity` means that the value is converted directly into the corresponding FFI type. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 96a88247180..006e8ec6c46 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index bd4f3f97ec7..ff86713c543 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-rc6", default-features = false, path = "../" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index a1756883ded..bfe2016ea51 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-rc6", default-features = false, path = "../" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 48dbeedbdad..39a48d10b14 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", path = "../" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-runtime-interface-test-wasm = { version = "2.0.0-rc5", path = "../test-wasm" } -sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-rc5", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../core" } -sp-io = { version = "2.0.0-rc5", path = "../../io" } +sp-runtime-interface = { version = "2.0.0-rc6", path = "../" } +sc-executor = { version = "0.8.0-rc6", path = "../../../client/executor" } +sp-runtime-interface-test-wasm = { version = "2.0.0-rc6", path = "../test-wasm" } +sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-rc6", path = "../test-wasm-deprecated" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-rc6", path = "../../runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../core" } +sp-io = { version = "2.0.0-rc6", path = "../../io" } tracing = "0.1.18" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 78c0bbcea8f..f47b3605205 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../io" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "2.0.0-rc6", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../inherents" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } @@ -33,7 +33,7 @@ either = { version = "1.5", default-features = false } [dev-dependencies] serde_json = "1.0.41" rand = "0.7.2" -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } [features] bench = [] diff --git a/primitives/runtime/README.md b/primitives/runtime/README.md new file mode 100644 index 00000000000..1515cd8e296 --- /dev/null +++ b/primitives/runtime/README.md @@ -0,0 +1,3 @@ +Runtime Modules shared primitive types. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index ca5cfa4fdc3..98376c77464 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,10 +13,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0-rc5", default-features = false, path = "../wasm-interface" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../io" } +sp-wasm-interface = { version = "2.0.0-rc6", default-features = false, path = "../wasm-interface" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [dev-dependencies] diff --git a/primitives/sandbox/README.md b/primitives/sandbox/README.md new file mode 100644 index 00000000000..9335b53ae1f --- /dev/null +++ b/primitives/sandbox/README.md @@ -0,0 +1,21 @@ +This crate provides means to instantiate and execute wasm modules. + +It works even when the user of this library executes from +inside the wasm VM. In this case the same VM is used for execution +of both the sandbox owner and the sandboxed module, without compromising security +and without the performance penalty of full wasm emulation inside wasm. + +This is achieved by using bindings to the wasm VM, which are published by the host API. +This API is thin and consists of only a handful functions. It contains functions for instantiating +modules and executing them, but doesn't contain functions for inspecting the module +structure. The user of this library is supposed to read the wasm module. + +When this crate is used in the `std` environment all these functions are implemented by directly +calling the wasm VM. + +Examples of possible use-cases for this library are not limited to the following: + +- implementing smart-contract runtimes that use wasm for contract code +- executing a wasm substrate runtime inside of a wasm parachain + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 05b8cc74f2c..5fcaf9fe87f 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/serializer/README.md b/primitives/serializer/README.md new file mode 100644 index 00000000000..083a0857cc0 --- /dev/null +++ b/primitives/serializer/README.md @@ -0,0 +1,6 @@ +Substrate customizable serde serializer. + +The idea is that we can later change the implementation +to something more compact, but for now we're using JSON. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 320e6adc1c7..b8bad3ed8da 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-session" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../staking" } -sp-runtime = { version = "2.0.0-rc5", optional = true, path = "../runtime" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-staking = { version = "2.0.0-rc6", default-features = false, path = "../staking" } +sp-runtime = { version = "2.0.0-rc6", optional = true, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/session/README.md b/primitives/session/README.md new file mode 100644 index 00000000000..2d1f9d9bc1d --- /dev/null +++ b/primitives/session/README.md @@ -0,0 +1,3 @@ +Substrate core types around sessions. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index bb6ef06f34d..8b324ca6bdb 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-staking" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/staking/README.md b/primitives/staking/README.md new file mode 100644 index 00000000000..892e1379d9a --- /dev/null +++ b/primitives/staking/README.md @@ -0,0 +1,4 @@ +A crate which contains primitives that are useful for implementation that uses staking +approaches in general. Definitions related to sessions, slashing, etc go here. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 781d3b068a8..c563fa5384d 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -18,19 +18,19 @@ parking_lot = "0.10.0" hash-db = "0.15.2" trie-db = "0.22.0" trie-root = "0.16.0" -sp-trie = { version = "2.0.0-rc5", path = "../trie" } -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../panic-handler" } +sp-trie = { version = "2.0.0-rc6", path = "../trie" } +sp-core = { version = "2.0.0-rc6", path = "../core" } +sp-panic-handler = { version = "2.0.0-rc6", path = "../panic-handler" } codec = { package = "parity-scale-codec", version = "1.3.1" } num-traits = "0.2.8" rand = "0.7.2" -sp-externalities = { version = "0.8.0-rc5", path = "../externalities" } +sp-externalities = { version = "0.8.0-rc6", path = "../externalities" } itertools = "0.9" smallvec = "1.4.1" [dev-dependencies] hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } pretty_assertions = "0.6.1" [features] diff --git a/primitives/state-machine/README.md b/primitives/state-machine/README.md new file mode 100644 index 00000000000..aa244da62d5 --- /dev/null +++ b/primitives/state-machine/README.md @@ -0,0 +1,3 @@ +Substrate state machine implementation. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index a43d1997f7f..1e788c43d5d 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/std/README.md b/primitives/std/README.md new file mode 100644 index 00000000000..6dddd8fbbdd --- /dev/null +++ b/primitives/std/README.md @@ -0,0 +1,4 @@ +Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std +or client/alloc to be used with any code that depends on the runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 46d76fd7d28..9f86736518f 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -13,11 +13,11 @@ documentation = "https://docs.rs/sp-storage/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } +sp-debug-derive = { version = "2.0.0-rc6", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/storage/README.md b/primitives/storage/README.md new file mode 100644 index 00000000000..c33144fc4f6 --- /dev/null +++ b/primitives/storage/README.md @@ -0,0 +1,3 @@ +Primitive types for storage related stuff. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 1101dd9ccc6..668a12aeca5 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-test-primitives" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [features] diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 794729f7754..deaa44ff39e 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-timestamp" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "Substrate core types and inherents for timestamps." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = { version = "0.2", optional = true } diff --git a/primitives/timestamp/README.md b/primitives/timestamp/README.md new file mode 100644 index 00000000000..a61a776912c --- /dev/null +++ b/primitives/timestamp/README.md @@ -0,0 +1,3 @@ +Substrate core types and inherents for timestamps. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 03bec79685e..13603947567 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tracing" -version = "2.0.0-rc5" +version = "2.0.0-rc6" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/primitives/tracing/README.md b/primitives/tracing/README.md new file mode 100644 index 00000000000..d621a23ee3e --- /dev/null +++ b/primitives/tracing/README.md @@ -0,0 +1,15 @@ +Substrate tracing primitives and macros. + +To trace functions or invidual code in Substrate, this crate provides [`tracing_span`] +and [`enter_span`]. See the individual docs for how to use these macros. + +Note that to allow traces from wasm execution environment there are +2 reserved identifiers for tracing `Field` recording, stored in the consts: +`WASM_TARGET_KEY` and `WASM_NAME_KEY` - if you choose to record fields, you +must ensure that your identifiers do not clash with either of these. + +Additionally, we have a const: `WASM_TRACE_IDENTIFIER`, which holds a span name used +to signal that the 'actual' span name and target should be retrieved instead from +the associated Fields mentioned above. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 344f491e55f..9ec79ee66b4 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,9 +18,9 @@ derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-blockchain = { version = "2.0.0-rc5", optional = true, path = "../blockchain" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../api" } +sp-blockchain = { version = "2.0.0-rc6", optional = true, path = "../blockchain" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/transaction-pool/README.md b/primitives/transaction-pool/README.md new file mode 100644 index 00000000000..417565ebfce --- /dev/null +++ b/primitives/transaction-pool/README.md @@ -0,0 +1,3 @@ +Transaction pool primitives types & Runtime API. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 8dd386e0951..3ebe53c680a 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -18,19 +18,19 @@ harness = false [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.22.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.24.0", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } [dev-dependencies] trie-bench = "0.24.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } [features] default = ["std"] diff --git a/primitives/trie/README.md b/primitives/trie/README.md new file mode 100644 index 00000000000..634ba4bdead --- /dev/null +++ b/primitives/trie/README.md @@ -0,0 +1,3 @@ +Utility functions to interact with Substrate's Base-16 Modified Merkle Patricia tree ("trie"). + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml index a554a44ce44..b21dba40a9d 100644 --- a/primitives/utils/Cargo.toml +++ b/primitives/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/utils/README.md b/primitives/utils/README.md new file mode 100644 index 00000000000..b0e04a3f4f1 --- /dev/null +++ b/primitives/utils/README.md @@ -0,0 +1,3 @@ +Utilities Primitives for Substrate + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index fb5bcaed77c..8dbf2000f0c 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/version/README.md b/primitives/version/README.md new file mode 100644 index 00000000000..84f0ae57d9d --- /dev/null +++ b/primitives/version/README.md @@ -0,0 +1,3 @@ +Version module for the Substrate runtime; Provides a function that returns the runtime version. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 00fbaf5f713..e4ce84eaf0e 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0-rc5", path = "../std", default-features = false } +sp-std = { version = "2.0.0-rc6", path = "../std", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/wasm-interface/README.md b/primitives/wasm-interface/README.md new file mode 100644 index 00000000000..7e6c46581ae --- /dev/null +++ b/primitives/wasm-interface/README.md @@ -0,0 +1,3 @@ +Types and traits for interfacing between the host and the wasm runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 3b2a3702430..92bc9c71db5 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "substrate-test-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +description = "Substrate test utilities" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = { version = "0.3.1", features = ["compat"] } -substrate-test-utils-derive = { path = "./derive" } +substrate-test-utils-derive = { version = "0.8.0-rc6", path = "./derive" } tokio = { version = "0.2.13", features = ["macros"] } [dev-dependencies] -sc-service = { path = "../client/service" } +sc-service = { version = "0.8.0-rc6", path = "../client/service" } trybuild = { version = "1.0", features = ["diff"] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 5e97be2e0b3..29f5acd5b38 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -19,15 +19,15 @@ hash-db = "0.15.2" hex = "0.4" serde = "1.0.55" serde_json = "1.0.55" -sc-client-api = { version = "2.0.0-rc5", path = "../../client/api" } -sc-client-db = { version = "0.8.0-rc5", features = ["test-helpers"], path = "../../client/db" } -sc-consensus = { version = "0.8.0-rc5", path = "../../client/consensus/common" } -sc-executor = { version = "0.8.0-rc5", path = "../../client/executor" } -sc-light = { version = "2.0.0-rc5", path = "../../client/light" } -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../client/service" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-client-api = { version = "2.0.0-rc6", path = "../../client/api" } +sc-client-db = { version = "0.8.0-rc6", features = ["test-helpers"], path = "../../client/db" } +sc-consensus = { version = "0.8.0-rc6", path = "../../client/consensus/common" } +sc-executor = { version = "0.8.0-rc6", path = "../../client/executor" } +sc-light = { version = "2.0.0-rc6", path = "../../client/light" } +sc-service = { version = "0.8.0-rc6", default-features = false, features = ["test-helpers"], path = "../../client/service" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } +sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } +sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 5ec3e10108c..e9dcc586c50 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "substrate-test-utils-derive" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +description = "Substrate test utilities macros" [dependencies] quote = "1.0.6" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 08e1b955ab4..6b354f5f6e9 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,35 +13,35 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/block-builder" } +sp-application-crypto = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-rc6", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.8.0-rc6", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../primitives/keyring" } +frame-executive = { version = "2.0.0-rc6", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc6", optional = true, path = "../../primitives/keyring" } memory-db = { version = "0.24.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc5"} -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-rc5"} -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../frame/timestamp" } -sp-finality-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-grandpa" } -sp-trie = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/transaction-pool" } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc6"} +sp-core = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-rc6"} +sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc6", default-features = false, path = "../../frame/support" } +sp-version = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/version" } +sp-session = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/session" } +sp-api = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "2.0.0-rc6", default-features = false, path = "../../frame/babe" } +frame-system = { version = "2.0.0-rc6", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-rc6", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "2.0.0-rc6", default-features = false, path = "../../frame/timestamp" } +sp-finality-grandpa = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/finality-grandpa" } +sp-trie = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.22.0", default-features = false } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-service = { version = "0.8.0-rc5", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sc-service = { version = "0.8.0-rc6", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } # 3rd party cfg-if = "0.1.10" @@ -49,10 +49,10 @@ log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0-rc5", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0-rc5", path = "../../client/executor" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "./client" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../client/block-builder" } +sc-executor = { version = "0.8.0-rc6", path = "../../client/executor" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "./client" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index c67ceb72010..3406ca6f95c 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-client" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,17 +12,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "2.0.0-rc5", path = "../../../client/light" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../client" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +sc-light = { version = "2.0.0-rc6", path = "../../../client/light" } +sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-rc6", path = "../../../client/block-builder" } +substrate-test-client = { version = "2.0.0-rc6", path = "../../client" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +substrate-test-runtime = { version = "2.0.0-rc6", path = "../../runtime" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../client/api" } +sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../../client/service" } futures = "0.3.4" diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index da4d2d592de..ee0992c44be 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../client" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../client" } parking_lot = "0.10.0" codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0-rc5", path = "../../../client/transaction-pool/graph" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "2.0.0-rc6", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 6d16edde12c..cf7f2815187 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -6,11 +6,12 @@ edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] tokio = { version = "0.2.13", features = ["macros"] } -test-utils = { path = "..", package = "substrate-test-utils" } -sc-service = { path = "../../client/service" } +test-utils = { version = "2.0.0-rc6", path = "..", package = "substrate-test-utils" } +sc-service = { version = "0.8.0-rc6", path = "../../client/service" } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 20e04148faf..c4a6152b924 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-browser-utils" -version = "0.8.0-rc5" +version = "0.8.0-rc6" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" @@ -22,11 +22,11 @@ js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.7" -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } -sc-informant = { version = "0.8.0-rc5", path = "../../client/informant" } -sc-service = { version = "0.8.0-rc5", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network", version = "0.8.0-rc5"} -sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-rc5"} +sp-database = { version = "2.0.0-rc6", path = "../../primitives/database" } +sc-informant = { version = "0.8.0-rc6", path = "../../client/informant" } +sc-service = { version = "0.8.0-rc6", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network", version = "0.8.0-rc6"} +sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-rc6"} # Imported just for the `wasm-bindgen` feature rand6 = { package = "rand", version = "0.6", features = ["wasm-bindgen"] } diff --git a/utils/browser/README.md b/utils/browser/README.md new file mode 100644 index 00000000000..9718db58b37 --- /dev/null +++ b/utils/browser/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index ecf15503c54..383f38bcb0b 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/utils/build-script-utils/README.md b/utils/build-script-utils/README.md new file mode 100644 index 00000000000..1c184f67326 --- /dev/null +++ b/utils/build-script-utils/README.md @@ -0,0 +1,3 @@ +Crate with utility functions for `build.rs` scripts. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 30a38545980..b02fee519df 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/utils/fork-tree/README.md b/utils/fork-tree/README.md new file mode 100644 index 00000000000..fef7db57f68 --- /dev/null +++ b/utils/fork-tree/README.md @@ -0,0 +1,4 @@ +Utility library for managing tree-like ordered data with logic for pruning +the tree while finalizing nodes. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index c34404575e5..0ecb3b883e8 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,15 +12,15 @@ description = "CLI for benchmarking FRAME" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/benchmarking" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-db = { version = "0.8.0-rc5", path = "../../../client/db" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "2.0.0-rc6", path = "../../../frame/benchmarking" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sc-service = { version = "0.8.0-rc6", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } +sc-client-db = { version = "0.8.0-rc6", path = "../../../client/db" } +sc-executor = { version = "0.8.0-rc6", path = "../../../client/executor" } +sp-externalities = { version = "0.8.0-rc6", path = "../../../primitives/externalities" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc6", path = "../../../primitives/state-machine" } structopt = "0.3.8" codec = { version = "1.3.1", package = "parity-scale-codec" } diff --git a/utils/frame/benchmarking-cli/README.md b/utils/frame/benchmarking-cli/README.md new file mode 100644 index 00000000000..9718db58b37 --- /dev/null +++ b/utils/frame/benchmarking-cli/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 913297819c0..5be62eff0ab 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-cli" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -10,11 +10,11 @@ description = "cli interface for FRAME" documentation = "https://docs.rs/substrate-frame-cli" [dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } +sc-cli = { version = "0.8.0-rc6", path = "../../../client/cli" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } structopt = "0.3.8" -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } +frame-system = { version = "2.0.0-rc6", path = "../../../frame/system" } [dev-dependencies] diff --git a/utils/frame/frame-utilities-cli/README.md b/utils/frame/frame-utilities-cli/README.md new file mode 100644 index 00000000000..b1e4f869af7 --- /dev/null +++ b/utils/frame/frame-utilities-cli/README.md @@ -0,0 +1,3 @@ +frame-system CLI utilities + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index d5f34fe2b2d..784fe90cdf3 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "Apache-2.0" @@ -17,10 +17,10 @@ jsonrpc-client-transports = { version = "14.2.0", default-features = false, feat jsonrpc-core = "14.2.0" codec = { package = "parity-scale-codec", version = "1.3.1" } serde = "1" -frame-support = { version = "2.0.0-rc5", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0-rc5", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../../client/rpc-api" } +frame-support = { version = "2.0.0-rc6", path = "../../../../frame/support" } +sp-storage = { version = "2.0.0-rc6", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0-rc5", path = "../../../../frame/system" } +frame-system = { version = "2.0.0-rc6", path = "../../../../frame/system" } tokio = "0.2" diff --git a/utils/frame/rpc/support/README.md b/utils/frame/rpc/support/README.md new file mode 100644 index 00000000000..ca575061293 --- /dev/null +++ b/utils/frame/rpc/support/README.md @@ -0,0 +1,4 @@ +Combines [sc_rpc_api::state::StateClient] with [frame_support::storage::generator] traits +to provide strongly typed chain state queries over rpc. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 33a949fddd0..0f1e27efc70 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0-rc5" +version = "2.0.0-rc6" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "FRAME's system exposed over Substrate RPC" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../../../client/api" } +sc-client-api = { version = "2.0.0-rc6", path = "../../../../client/api" } codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.4", features = ["compat"] } jsonrpc-core = "14.2.0" @@ -20,16 +20,16 @@ jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0-rc5", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../../primitives/transaction-pool" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../../primitives/block-builder" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../../client/rpc-api" } +sp-runtime = { version = "2.0.0-rc6", path = "../../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc6", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "2.0.0-rc6", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "2.0.0-rc6", path = "../../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc6", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "2.0.0-rc6", path = "../../../../primitives/transaction-pool" } +sp-block-builder = { version = "2.0.0-rc6", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.8.0-rc6", path = "../../../../client/rpc-api" } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../../../test-utils/runtime/client" } env_logger = "0.7.0" -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-rc6", path = "../../../../client/transaction-pool" } diff --git a/utils/frame/rpc/system/README.md b/utils/frame/rpc/system/README.md new file mode 100644 index 00000000000..38986983d93 --- /dev/null +++ b/utils/frame/rpc/system/README.md @@ -0,0 +1,3 @@ +System FRAME specific RPC methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 805ea19cdc6..4ed4575ccf7 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0-rc5" +version = "0.8.0-rc6" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -- GitLab From 83aa4182f2e1484ce2c9293da95c375c9520851d Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 20 Aug 2020 17:10:00 +0200 Subject: [PATCH 114/132] Try un-ignore flaky test (#6923) --- client/network/src/service/tests.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 031d1641d23..797942e1c24 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -346,9 +346,7 @@ fn lots_of_incoming_peers_works() { }); } -// TODO: this test is at the moment ignored because of https://github.com/paritytech/substrate/issues/6766 #[test] -#[ignore] fn notifications_back_pressure() { // Node 1 floods node 2 with notifications. Random sleeps are done on node 2 to simulate the // node being busy. We make sure that all notifications are received. -- GitLab From 8b0a5ba78d4fa38c0be39345872c43b85a80b4bc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 20 Aug 2020 17:53:37 +0200 Subject: [PATCH 115/132] Remove the legacy requests-answering protocols (#6709) Co-authored-by: parity-processbot <> --- client/network/src/protocol.rs | 340 ++------------------------------- client/network/src/service.rs | 1 - 2 files changed, 18 insertions(+), 323 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c1c9ef02ea6..ff95d8f12fc 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,7 +18,7 @@ use crate::{ ExHashT, - chain::{Client, FinalityProofProvider}, + chain::Client, config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, error, utils::{interval, LruHashSet}, @@ -31,10 +31,6 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::{ - storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType}, - hexdisplay::HexDisplay -}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -54,12 +50,11 @@ use prometheus_endpoint::{ }; use sync::{ChainSync, SyncState}; use std::borrow::Cow; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map::Entry}; +use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -use sc_client_api::{ChangesProof, StorageProof}; use wasm_timer::Instant; mod generic_proto; @@ -118,8 +113,6 @@ mod rep { pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); /// Reputation change when a peer sends us a bad transaction. pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); - /// We sent an RPC query to the given node, but it failed. - pub const RPC_FAILED: Rep = Rep::new(-(1 << 12), "Remote call failed"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// We received an unexpected response. @@ -249,8 +242,6 @@ pub struct Protocol { /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, transaction_pool: Arc>, - /// When asked for a proof of finality, we use this struct to build one. - finality_proof_provider: Option>>, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: GenericProto, /// For each legacy gossiping engine ID, the corresponding new protocol name. @@ -388,7 +379,6 @@ impl Protocol { local_peer_id: PeerId, chain: Arc>, transaction_pool: Arc>, - finality_proof_provider: Option>>, finality_proof_request_builder: Option>, protocol_id: ProtocolId, peerset_config: sc_peerset::PeersetConfig, @@ -464,7 +454,6 @@ impl Protocol { sync, important_peers, transaction_pool, - finality_proof_provider, peerset_handle: peerset_handle.clone(), behaviour, protocol_name_by_engine: HashMap::new(), @@ -626,27 +615,30 @@ impl Protocol { }, GenericMessage::Transactions(m) => self.on_transactions(who, m), - GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), GenericMessage::RemoteCallResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), - GenericMessage::RemoteReadRequest(request) => - self.on_remote_read_request(who, request), GenericMessage::RemoteReadResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), - GenericMessage::RemoteHeaderRequest(request) => - self.on_remote_header_request(who, request), GenericMessage::RemoteHeaderResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), - GenericMessage::RemoteChangesRequest(request) => - self.on_remote_changes_request(who, request), GenericMessage::RemoteChangesResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofRequest(request) => - self.on_finality_proof_request(who, request), - GenericMessage::FinalityProofResponse(response) => - return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadChildRequest(request) => - self.on_remote_read_child_request(who, request), + GenericMessage::FinalityProofResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), + GenericMessage::FinalityProofRequest(_) | + GenericMessage::RemoteReadChildRequest(_) | + GenericMessage::RemoteCallRequest(_) | + GenericMessage::RemoteReadRequest(_) | + GenericMessage::RemoteHeaderRequest(_) | + GenericMessage::RemoteChangesRequest(_) => { + debug!( + target: "sub-libp2p", + "Received no longer supported legacy request from {:?}", + who + ); + self.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); + }, GenericMessage::Consensus(msg) => return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { CustomMessageOutcome::NotificationsReceived { @@ -1391,51 +1383,6 @@ impl Protocol { self.sync.on_block_finalized(&hash, *header.number()) } - fn on_remote_call_request( - &mut self, - who: PeerId, - request: message::RemoteCallRequest, - ) { - trace!(target: "sync", "Remote call request {} from {} ({} at {})", - request.id, - who, - request.method, - request.block - ); - - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-call"]).inc(); - } - - let proof = match self.context_data.chain.execution_proof( - &BlockId::Hash(request.block), - &request.method, - &request.data, - ) { - Ok((_, proof)) => proof, - Err(error) => { - trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", - request.id, - who, - request.method, - request.block, - error - ); - self.peerset_handle.report_peer(who.clone(), rep::RPC_FAILED); - StorageProof::empty() - } - }; - - self.send_message( - &who, - None, - GenericMessage::RemoteCallResponse(message::RemoteCallResponse { - id: request.id, - proof, - }), - ); - } - /// Request a justification for the given block. /// /// Uses `protocol` to queue a new justification request and tries to dispatch all pending @@ -1522,257 +1469,6 @@ impl Protocol { self.sync.on_finality_proof_import(request_block, finalization_result) } - fn on_remote_read_request( - &mut self, - who: PeerId, - request: message::RemoteReadRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-read"]).inc(); - } - - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read request {} from {} ({} at {})", - request.id, who, keys_str(), request.block); - let proof = match self.context_data.chain.read_proof( - &BlockId::Hash(request.block), - &mut request.keys.iter().map(AsRef::as_ref) - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", - request.id, - who, - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_read_child_request( - &mut self, - who: PeerId, - request: message::RemoteReadChildRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-child"]).inc(); - } - - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote child read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", - request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); - let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match ChildType::from_prefixed_key(prefixed_key) { - Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), - None => Err("Invalid child storage key".into()), - }; - let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &child_info, - &mut request.keys.iter().map(AsRef::as_ref), - )) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_header_request( - &mut self, - who: PeerId, - request: message::RemoteHeaderRequest>, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-header"]).inc(); - } - - trace!(target: "sync", "Remote header proof request {} from {} ({})", - request.id, who, request.block); - let (header, proof) = match self.context_data.chain.header_proof(&BlockId::Number(request.block)) { - Ok((header, proof)) => (Some(header), proof), - Err(error) => { - trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", - request.id, - who, - request.block, - error - ); - (Default::default(), StorageProof::empty()) - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { - id: request.id, - header, - proof, - }), - ); - } - - fn on_remote_changes_request( - &mut self, - who: PeerId, - request: message::RemoteChangesRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-changes"]).inc(); - } - - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", - request.id, - who, - if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last - ); - let key = StorageKey(request.key); - let prefixed_key = request.storage_key.as_ref() - .map(|storage_key| PrefixedStorageKey::new_ref(storage_key)); - let (first, last, min, max) = (request.first, request.last, request.min, request.max); - let proof = match self.context_data.chain.key_changes_proof( - first, - last, - min, - max, - prefixed_key, - &key, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", - request.id, - who, - if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&key.0)) - } else { - HexDisplay::from(&key.0).to_string() - }, - request.first, - request.last, - error - ); - ChangesProof:: { - max_block: Zero::zero(), - proof: vec![], - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { - id: request.id, - max: proof.max_block, - proof: proof.proof, - roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, - }), - ); - } - - fn on_finality_proof_request( - &mut self, - who: PeerId, - request: message::FinalityProofRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["finality-proof"]).inc(); - } - - trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); - let finality_proof = self.finality_proof_provider.as_ref() - .ok_or_else(|| String::from("Finality provider is not configured")) - .and_then(|provider| - provider.prove_finality(request.block, &request.request).map_err(|e| e.to_string()) - ); - let finality_proof = match finality_proof { - Ok(finality_proof) => finality_proof, - Err(error) => { - trace!(target: "sync", "Finality proof request from {} for {} failed with: {}", - who, - request.block, - error - ); - None - }, - }; - self.send_message( - &who, - None, - GenericMessage::FinalityProofResponse(message::FinalityProofResponse { - id: 0, - block: request.block, - proof: finality_proof, - }), - ); - } - /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, /// to notify of the response having arrived. pub fn on_finality_proof_response( diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 3ca74525935..1f2e98c281b 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -255,7 +255,6 @@ impl NetworkWorker { local_peer_id.clone(), params.chain.clone(), params.transaction_pool, - params.finality_proof_provider.clone(), params.finality_proof_request_builder, params.protocol_id.clone(), peerset_config, -- GitLab From 3f1341a384559ec046710e2f46eb4a7d9fc52f50 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 21 Aug 2020 08:25:23 +0200 Subject: [PATCH 116/132] *: Update to next libp2p version 0.24.0 (#6891) * *: Update to next libp2p version (likely v0.24.0) * Revert "*: Update to next libp2p version (likely v0.24.0)" This reverts commit ffe1545aba6c2557a2843579de331f3fc1c60743. * */Cargo.toml: Update to libp2p v0.24.0 * client/network/src/service: Handle ConnectionClosed returning Option * Cargo.*: Test kad usize conversion * Revert "Cargo.*: Test kad usize conversion" This reverts commit ad317879782f982cb4a4c76029a72b5b97e82bec. * Cargo.lock: Update to libp2p-kad v0.22.1 * client/cli/Cargo.toml: Update to libp2p 0.24.0 --- Cargo.lock | 191 +++++++++++-------------- bin/node/browser-testing/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 6 +- client/network/src/service.rs | 19 +-- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- utils/browser/Cargo.toml | 2 +- 12 files changed, 103 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70b3581bb90..45e6ff3eeb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2285,6 +2285,12 @@ dependencies = [ "proc-macro-hack", ] +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + [[package]] name = "hmac" version = "0.7.1" @@ -2885,10 +2891,11 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0306a49ee6a89468f96089906f36b0eef82c988dcfc8acf3e2dcd6ad1c859f85" +checksum = "76c101edbb9c06955fd4085b77d2abc31cf3650134d77068b35c44967756ada8" dependencies = [ + "atomic", "bytes 0.5.6", "futures 0.3.5", "lazy_static", @@ -2902,7 +2909,7 @@ dependencies = [ "libp2p-kad", "libp2p-mdns", "libp2p-mplex", - "libp2p-noise 0.21.0", + "libp2p-noise", "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", @@ -2922,44 +2929,11 @@ dependencies = [ "wasm-timer", ] -[[package]] -name = "libp2p" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1ebb6c031584a5af181fe3a1e4b074af5d0b1a3b31663200f0251f4bcff6b5c" -dependencies = [ - "atomic", - "bytes 0.5.6", - "futures 0.3.5", - "lazy_static", - "libp2p-core", - "libp2p-core-derive", - "libp2p-dns", - "libp2p-identify", - "libp2p-kad", - "libp2p-mdns", - "libp2p-mplex", - "libp2p-noise 0.22.0", - "libp2p-ping", - "libp2p-secio", - "libp2p-swarm", - "libp2p-tcp", - "libp2p-wasm-ext", - "libp2p-websocket", - "libp2p-yamux", - "multihash", - "parity-multiaddr", - "parking_lot 0.10.2", - "pin-project", - "smallvec 1.4.1", - "wasm-timer", -] - [[package]] name = "libp2p-core" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a694fd76d7c33a45a0e6e1525e9b9b5d11127c9c94e560ac0f8abba54ed80af" +checksum = "17cea54ea4a846a7c47e4347db0fc7a4129dcb0fb57f07f57e473820edbfcbde" dependencies = [ "asn1_der", "bs58", @@ -2991,9 +2965,9 @@ dependencies = [ [[package]] name = "libp2p-core-derive" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "515c4a7cba5d321bb88ed3ed803997bdd5634ce35c9c5e8e9ace9c512e57eceb" +checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" dependencies = [ "quote", "syn", @@ -3001,9 +2975,9 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abeff37fa533fead23fc71b14ed0a2aced36c0c65c3d0078aff07821fb71029e" +checksum = "bc6174d6addc9cc5fd84af7099480774035dd1a7cdf48dd31b23dea45cf57638" dependencies = [ "flate2", "futures 0.3.5", @@ -3012,9 +2986,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751924b6b98e350005e0b87a822beb246792a3fb878c684e088f866158120ac" +checksum = "fce8769cfe677a567d2677dc02a9e5be27a24acf1ff78a59cef425caae009a6a" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3023,9 +2997,9 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d4f310a02441b681075037ffb41649ee8836619559311b801ef3d5cdbe14cf" +checksum = "2f2342965ac7ea4b85f4df5288089796421f9297ba4020dc9692f4ef728590dc" dependencies = [ "cuckoofilter", "fnv", @@ -3040,9 +3014,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a70f76b6c53ae9c97c234498c799802e43f91766bcf4a2a1f94f9339617d713b" +checksum = "0828b4f0c76c2edc68da574e391ce981bac5316d65785cddfe8c273d4c9bd4bb" dependencies = [ "base64 0.11.0", "byteorder 1.3.4", @@ -3050,10 +3024,11 @@ dependencies = [ "fnv", "futures 0.3.5", "futures_codec", + "hex_fmt", "libp2p-core", "libp2p-swarm", "log", - "lru 0.4.3", + "lru_time_cache", "prost", "prost-build", "rand 0.7.3", @@ -3065,9 +3040,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912c00a7bf67e0e765daf0cc37e08f675ea26aba3d6d1fbfaee81f19a4c23049" +checksum = "41efcb5b521b65d2c45432a244ce6427cdd3649228cd192f397d1fa67682aef2" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3081,9 +3056,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.21.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44ed3a4c8111c570ab2bffb30c6353178d7603ce3787e3c5f2493c8d3d16d1f0" +checksum = "ca9b4ccc868863317af3f65eb241811ceadd971d133183040140f5496037e0ae" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.6", @@ -3108,9 +3083,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd004c668160fd922f7268b2cd1e4550ff69165d9c744e9eb5770086eb753d02" +checksum = "d4fe5614c2c5af74ef5870aad0fce73c9e4707716c4ee7cdf06cf9a0376d3815" dependencies = [ "async-std", "data-encoding", @@ -3130,9 +3105,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ae0ffacd30f073f96cd518b2c9cd2cb18ac27c3d136a4b23cf1af99f33e541" +checksum = "df9e79541e71590846f773efce1b6d0538804992ee54ff2f407e05d63a9ddc23" dependencies = [ "bytes 0.5.6", "fnv", @@ -3146,31 +3121,9 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f353f8966bbaaf7456535fffd3f366f153148773a0cf04b2ec3860955cb720e" -dependencies = [ - "bytes 0.5.6", - "curve25519-dalek", - "futures 0.3.5", - "lazy_static", - "libp2p-core", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.8.2", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", -] - -[[package]] -name = "libp2p-noise" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e594f2de0c23c2b7ad14802c991a2e68e95315c6a6c7715e53801506f20135d" +checksum = "0beba6459d06153f5f8e23da3df1d2183798b1f457c7c9468ff99760bcbcc60b" dependencies = [ "bytes 0.5.6", "curve25519-dalek", @@ -3190,9 +3143,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70130cf130e4ba6dc177366e72dd9f86f9e3588fa1a0c4145247e676f16affad" +checksum = "670261ef938567b614746b078e049b03b55617538a8d415071c518f97532d043" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3205,9 +3158,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f0308a97f6fdd37a2bc388070e471c3ce9d92aa45c99d75c87c2dc5d5cac96" +checksum = "b3a61dfd53d1264ddff1206e4827193efaa72bab27782dfcd63c0dec120a1875" dependencies = [ "bytes 0.5.6", "futures 0.3.5", @@ -3237,23 +3190,26 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f48682b48a96545a323edd150c1d64fc1e250240bba02866e9f902e3dc032a9" +checksum = "4af0de0e56a11d46c5191a61019733b5618dc955c0a36f82866bb6d5d81a7f8f" dependencies = [ "async-trait", "futures 0.3.5", "libp2p-core", "libp2p-swarm", + "log", + "lru 0.6.0", + "rand 0.7.3", "smallvec 1.4.1", "wasm-timer", ] [[package]] name = "libp2p-secio" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff43513c383f7cdab2736eb98465fc4c5dd5d1988df89749dc8a68950349d56" +checksum = "a04b320cc0394554e8d0adca21f4efd9f8c2da4930211d92e411a19a4dfd769e" dependencies = [ "aes-ctr", "ctr", @@ -3281,9 +3237,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88d5e2a090a2aadf042cd33484e2f015c6dab212567406a59deece5dedbd133" +checksum = "57e4a7e64156e9d1a2daae36b5d791f057b9c53c9364a8e75f7f9848b54f9d68" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3296,9 +3252,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1fa2bbad054020cb875546a577a66a65a5bf42eff55ed5265f92ffee3cc052" +checksum = "f0f65400ccfbbf9a356733bceca6c519c9db0deb5fbcc0b81f89837c4cd53997" dependencies = [ "async-std", "futures 0.3.5", @@ -3312,9 +3268,9 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9db9fce9e3588c3118475d9ca761c5c133b639a624a7341e2a61e4b28c376b8" +checksum = "95bc8b0ca1dda4cccb1bb156d47a32e45cfa447ef18f737209f014a63f94a4a2" dependencies = [ "async-std", "futures 0.3.5", @@ -3324,9 +3280,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0feb99e32fea20ffb1bbf56a6fb2614bff7325ff44a515728385170b3420d2c3" +checksum = "0f2f7b06d80d036ac5763a811185b7fe6951ad71c00544b17cc378a9069bb7c2" dependencies = [ "futures 0.3.5", "js-sys", @@ -3338,9 +3294,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046a5201f6e471f22b22b394e4d084269ed1e28cf7300f7b49874385db84c7bd" +checksum = "a5b350db65cf0a7c83a539a596ea261caae1552c0df2245df0f916ed2fd04572" dependencies = [ "async-tls", "either", @@ -3358,9 +3314,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ae9bf2f7d8a4be9c7e9b61df9de9dc1bd66419d669098f22f81f8d9571029a" +checksum = "b3969ead4ce530efb6f304623924245caf410f3b0b0139bd7007f205933788aa" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3498,6 +3454,21 @@ dependencies = [ "hashbrown 0.6.3", ] +[[package]] +name = "lru" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +dependencies = [ + "hashbrown 0.8.1", +] + +[[package]] +name = "lru_time_cache" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb241df5c4caeb888755363fc95f8a896618dc0d435e9e775f7930cb099beab" + [[package]] name = "mach" version = "0.3.2" @@ -3812,7 +3783,7 @@ dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "jsonrpc-core", - "libp2p 0.23.0", + "libp2p", "node-cli", "sc-rpc-api", "serde", @@ -6386,7 +6357,7 @@ dependencies = [ "env_logger", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p 0.23.0", + "libp2p", "log", "parity-scale-codec", "prost", @@ -6490,7 +6461,7 @@ dependencies = [ "futures 0.3.5", "hex", "lazy_static", - "libp2p 0.22.0", + "libp2p", "log", "names", "nix", @@ -7056,7 +7027,7 @@ dependencies = [ "futures_codec", "hex", "ip_network", - "libp2p 0.23.0", + "libp2p", "linked-hash-map", "linked_hash_set", "log", @@ -7103,7 +7074,7 @@ dependencies = [ "async-std", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p 0.23.0", + "libp2p", "log", "lru 0.4.3", "quickcheck", @@ -7121,7 +7092,7 @@ dependencies = [ "env_logger", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p 0.23.0", + "libp2p", "log", "parking_lot 0.10.2", "rand 0.7.3", @@ -7178,7 +7149,7 @@ name = "sc-peerset" version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", - "libp2p 0.23.0", + "libp2p", "log", "rand 0.7.3", "serde_json", @@ -7406,7 +7377,7 @@ version = "2.0.0-rc6" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", - "libp2p 0.23.0", + "libp2p", "log", "parking_lot 0.10.2", "pin-project", @@ -8075,7 +8046,7 @@ dependencies = [ "derive_more", "futures 0.3.5", "futures-timer 3.0.2", - "libp2p 0.23.0", + "libp2p", "log", "parity-scale-codec", "parking_lot 0.10.2", diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 977a602e1da..1cfc0623dd9 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.24.0", default-features = false } jsonrpc-core = "14.2.0" serde = "1.0.106" serde_json = "1.0.48" diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index b651dbbbc94..d154b356057 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", default-features = false, version = "1 derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.23.0", default-features = false, features = ["kad"] } +libp2p = { version = "0.24.0", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc6"} prost = "0.6.1" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 57bc622deb4..2643376f841 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -23,7 +23,7 @@ lazy_static = "1.4.0" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" fdlimit = "0.1.4" -libp2p = "0.22" +libp2p = "0.24.0" parity-scale-codec = "1.3.0" hex = "0.4.2" rand = "0.7.3" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 7af59a68dfe..f826bb88bad 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.24.0", default-features = false } log = "0.4.8" lru = "0.4.3" sc-network = { version = "0.8.0-rc6", path = "../network" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index a5020507b50..de885bc65a4 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -62,14 +62,14 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.23.0" +version = "0.24.0" default-features = false -features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] +features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" env_logger = "0.7.0" -libp2p = { version = "0.23.0", default-features = false, features = ["secio"] } +libp2p = { version = "0.24.0", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1f2e98c281b..c9213d4dde2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1633,18 +1633,19 @@ impl Future for NetworkWorker { ConnectedPoint::Listener { .. } => "in", }; let reason = match cause { - ConnectionError::IO(_) => "transport-error", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + Some(ConnectionError::IO(_)) => "transport-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))) => "ping-timeout", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(PingFailure::Timeout))))))))) => "ping-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))) => "force-closed", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + NotifsHandlerError::Legacy(LegacyConnectionKillError))))))))) => "force-closed", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))) => "sync-notifications-clogged", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => "protocol-error", - ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => "keep-alive-timeout", + NotifsHandlerError::SyncNotificationsClogged)))))))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", + None => "actively-closed", }; metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index f9e7ec4c89e..7f3f535ebbd 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.24.0", default-features = false } sp-consensus = { version = "0.8.0-rc6", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0-rc6", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0-rc6", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 550217c2b06..5856abf4e7e 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.24.0", default-features = false } sp-utils = { version = "2.0.0-rc6", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 2f3601b1729..3ad82f56125 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.23.0", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.24.0", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 4c5a0197de2..7af0cbd949a 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.24.0", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0-rc6"} sp-inherents = { version = "2.0.0-rc6", path = "../../inherents" } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index c4a6152b924..085939ffdcf 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.20", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.21", features = ["websocket"] } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" -- GitLab From 666555733c05a4c4df0445f567d7c921c10a83f3 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Fri, 21 Aug 2020 22:25:58 +0800 Subject: [PATCH 117/132] Remove duplicated dependencies (#6930) Signed-off-by: koushiro --- Cargo.lock | 221 +++++++--------------------- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- client/api/Cargo.toml | 12 +- client/executor/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 2 +- frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/democracy/Cargo.toml | 2 +- frame/elections-phragmen/Cargo.toml | 2 +- frame/elections/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- frame/system/Cargo.toml | 2 +- frame/vesting/Cargo.toml | 2 +- primitives/core/Cargo.toml | 4 +- primitives/state-machine/Cargo.toml | 2 +- primitives/storage/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 6 +- primitives/version/Cargo.toml | 2 +- 20 files changed, 81 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45e6ff3eeb0..18a7266d3cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -963,33 +963,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "criterion" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0363053954f3e679645fc443321ca128b7b950a6fe288cf5f9335cc22ee58394" -dependencies = [ - "atty", - "cast", - "clap", - "criterion-plot 0.3.1", - "csv", - "itertools 0.8.2", - "lazy_static", - "libc", - "num-traits", - "rand_core 0.3.1", - "rand_os", - "rand_xoshiro", - "rayon", - "rayon-core", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - [[package]] name = "criterion" version = "0.3.3" @@ -999,7 +972,7 @@ dependencies = [ "atty", "cast", "clap", - "criterion-plot 0.4.3", + "criterion-plot", "csv", "itertools 0.9.0", "lazy_static", @@ -1016,17 +989,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "criterion-plot" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" -dependencies = [ - "byteorder 1.3.4", - "cast", - "itertools 0.8.2", -] - [[package]] name = "criterion-plot" version = "0.4.3" @@ -1403,8 +1365,8 @@ dependencies = [ "crunchy", "fixed-hash", "impl-rlp", - "impl-serde 0.3.1", - "tiny-keccak 2.0.2", + "impl-serde", + "tiny-keccak", ] [[package]] @@ -1416,7 +1378,7 @@ dependencies = [ "ethbloom", "fixed-hash", "impl-rlp", - "impl-serde 0.3.1", + "impl-serde", "primitive-types", "uint", ] @@ -1678,7 +1640,7 @@ dependencies = [ "log", "once_cell 1.4.0", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "paste", "pretty_assertions", "serde", @@ -1745,7 +1707,7 @@ dependencies = [ name = "frame-system" version = "2.0.0-rc6" dependencies = [ - "criterion 0.2.11", + "criterion", "frame-support", "impl-trait-for-tuples", "parity-scale-codec", @@ -2268,22 +2230,9 @@ checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" [[package]] name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - -[[package]] -name = "hex-literal-impl" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" -dependencies = [ - "proc-macro-hack", -] +checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" [[package]] name = "hex_fmt" @@ -2494,15 +2443,6 @@ dependencies = [ "rlp", ] -[[package]] -name = "impl-serde" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" -dependencies = [ - "serde", -] - [[package]] name = "impl-serde" version = "0.3.1" @@ -2744,13 +2684,13 @@ checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" [[package]] name = "keccak-hasher" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" +checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" dependencies = [ "hash-db", "hash256-std-hasher", - "tiny-keccak 1.5.0", + "tiny-keccak", ] [[package]] @@ -2772,45 +2712,24 @@ dependencies = [ "log", ] -[[package]] -name = "kvdb" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e763b2a9b500ba47948061d1e8bc3b5f03a8a1f067dbcf822a4d2c84d2b54a3a" -dependencies = [ - "parity-util-mem 0.6.0", - "smallvec 1.4.1", -] - [[package]] name = "kvdb" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" dependencies = [ - "parity-util-mem 0.7.0", + "parity-util-mem", "smallvec 1.4.1", ] -[[package]] -name = "kvdb-memorydb" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73027d5e228de6f503b5b7335d530404fc26230a6ae3e09b33ec6e45408509a4" -dependencies = [ - "kvdb 0.6.0", - "parity-util-mem 0.6.0", - "parking_lot 0.10.2", -] - [[package]] name = "kvdb-memorydb" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" dependencies = [ - "kvdb 0.7.0", - "parity-util-mem 0.7.0", + "kvdb", + "parity-util-mem", "parking_lot 0.10.2", ] @@ -2821,11 +2740,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c341ef15cfb1f923fa3b5138bfbd2d4813a2c1640b473727a53351c7f0b0fa2" dependencies = [ "fs-swap", - "kvdb 0.7.0", + "kvdb", "log", "num_cpus", "owning_ref", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "regex", "rocksdb", @@ -2840,10 +2759,10 @@ checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ "futures 0.3.5", "js-sys", - "kvdb 0.7.0", - "kvdb-memorydb 0.7.0", + "kvdb", + "kvdb-memorydb", "log", - "parity-util-mem 0.7.0", + "parity-util-mem", "send_wrapper 0.3.0", "wasm-bindgen", "web-sys", @@ -3541,7 +3460,7 @@ checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" dependencies = [ "hash-db", "hashbrown 0.8.1", - "parity-util-mem 0.7.0", + "parity-util-mem", ] [[package]] @@ -3747,7 +3666,7 @@ dependencies = [ "futures 0.3.5", "hash-db", "hex", - "kvdb 0.7.0", + "kvdb", "kvdb-rocksdb", "lazy_static", "log", @@ -3755,7 +3674,7 @@ dependencies = [ "node-runtime", "node-testing", "parity-db", - "parity-util-mem 0.7.0", + "parity-util-mem", "rand 0.7.3", "sc-basic-authorship", "sc-cli", @@ -3874,7 +3793,7 @@ dependencies = [ name = "node-executor" version = "2.0.0-rc6" dependencies = [ - "criterion 0.3.3", + "criterion", "frame-benchmarking", "frame-support", "frame-system", @@ -4113,7 +4032,7 @@ dependencies = [ name = "node-testing" version = "2.0.0-rc6" dependencies = [ - "criterion 0.3.3", + "criterion", "frame-support", "frame-system", "fs_extra", @@ -5277,19 +5196,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "parity-util-mem" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" -dependencies = [ - "cfg-if", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.10.2", - "winapi 0.3.9", -] - [[package]] name = "parity-util-mem" version = "0.7.0" @@ -5605,7 +5511,7 @@ dependencies = [ "fixed-hash", "impl-codec", "impl-rlp", - "impl-serde 0.3.1", + "impl-serde", "uint", ] @@ -5978,16 +5884,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "rand_xoshiro" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b418169fb9c46533f326efd6eed2576699c44ca92d3052a066214a8d828929" -dependencies = [ - "byteorder 1.3.4", - "rand_core 0.3.1", -] - [[package]] name = "raw-cpuid" version = "7.0.3" @@ -6466,7 +6362,7 @@ dependencies = [ "names", "nix", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "rand 0.7.3", "regex", "rpassword", @@ -6505,8 +6401,8 @@ dependencies = [ "futures 0.3.5", "hash-db", "hex-literal", - "kvdb 0.7.0", - "kvdb-memorydb 0.6.0", + "kvdb", + "kvdb-memorydb", "lazy_static", "log", "parity-scale-codec", @@ -6541,14 +6437,14 @@ dependencies = [ "blake2-rfc", "env_logger", "hash-db", - "kvdb 0.7.0", - "kvdb-memorydb 0.7.0", + "kvdb", + "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", "log", "parity-db", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "quickcheck", "sc-client-api", @@ -6963,7 +6859,7 @@ dependencies = [ "ansi_term 0.12.1", "futures 0.3.5", "log", - "parity-util-mem 0.7.0", + "parity-util-mem", "sc-client-api", "sc-network", "sp-blockchain", @@ -7272,7 +7168,7 @@ dependencies = [ "lazy_static", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "pin-project", "rand 0.7.3", @@ -7364,7 +7260,7 @@ dependencies = [ "env_logger", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.10.2", "sc-client-api", @@ -7413,13 +7309,13 @@ name = "sc-transaction-graph" version = "2.0.0-rc6" dependencies = [ "assert_matches", - "criterion 0.3.3", + "criterion", "derive_more", "futures 0.3.5", "linked-hash-map", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "retain_mut", "serde", @@ -7444,7 +7340,7 @@ dependencies = [ "intervalier", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "sc-block-builder", "sc-client-api", @@ -7919,7 +7815,7 @@ dependencies = [ name = "sp-api-test" version = "2.0.0-rc6" dependencies = [ - "criterion 0.3.3", + "criterion", "parity-scale-codec", "rustversion", "sc-block-builder", @@ -7960,7 +7856,7 @@ dependencies = [ name = "sp-arithmetic" version = "2.0.0-rc6" dependencies = [ - "criterion 0.3.3", + "criterion", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -8133,7 +8029,7 @@ dependencies = [ "base58", "blake2-rfc", "byteorder 1.3.4", - "criterion 0.2.11", + "criterion", "derive_more", "dyn-clonable", "ed25519-dalek", @@ -8142,14 +8038,14 @@ dependencies = [ "hash256-std-hasher", "hex", "hex-literal", - "impl-serde 0.3.1", + "impl-serde", "lazy_static", "libsecp256k1", "log", "merlin", "num-traits", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "pretty_assertions", "primitive-types", @@ -8169,7 +8065,7 @@ dependencies = [ "sp-storage", "substrate-bip39", "tiny-bip39", - "tiny-keccak 2.0.2", + "tiny-keccak", "twox-hash", "wasmi", "zeroize", @@ -8179,7 +8075,7 @@ dependencies = [ name = "sp-database" version = "2.0.0-rc6" dependencies = [ - "kvdb 0.7.0", + "kvdb", "parking_lot 0.10.2", ] @@ -8338,7 +8234,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "paste", "rand 0.7.3", "serde", @@ -8495,7 +8391,7 @@ version = "2.0.0-rc6" name = "sp-storage" version = "2.0.0-rc6" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", "parity-scale-codec", "ref-cast", "serde", @@ -8508,7 +8404,7 @@ name = "sp-test-primitives" version = "2.0.0-rc6" dependencies = [ "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "serde", "sp-application-crypto", "sp-core", @@ -8555,7 +8451,7 @@ dependencies = [ name = "sp-trie" version = "2.0.0-rc6" dependencies = [ - "criterion 0.2.11", + "criterion", "hash-db", "hex-literal", "memory-db", @@ -8584,7 +8480,7 @@ dependencies = [ name = "sp-version" version = "2.0.0-rc6" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", "parity-scale-codec", "serde", "sp-runtime", @@ -8867,7 +8763,7 @@ dependencies = [ "pallet-babe", "pallet-timestamp", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "sc-block-builder", "sc-executor", "sc-service", @@ -9146,15 +9042,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - [[package]] name = "tiny-keccak" version = "2.0.2" @@ -9573,11 +9460,11 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24987a413863acfa081fb75051d0c2824cd4c450e2f0a7e03dca93ac989775fc" +checksum = "af2cc37cac8cc158119982c920cbb9b8243d8540c1d13b8aca84484bfc83a426" dependencies = [ - "criterion 0.2.11", + "criterion", "hash-db", "keccak-hasher", "memory-db", @@ -9657,7 +9544,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" dependencies = [ - "rand 0.7.3", + "rand 0.5.6", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 09ed51616d5..92f223427a7 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -37,7 +37,7 @@ crate-type = ["cdylib", "rlib"] codec = { package = "parity-scale-codec", version = "1.3.4" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } -hex-literal = "0.2.1" +hex-literal = "0.3.1" jsonrpc-core = "14.2.0" jsonrpc-pubsub = "14.2.0" log = "0.4.8" diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 303db4c2d2e..1195456b0fa 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "1.3.4", default-features = integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" -hex-literal = { version = "0.2.1", optional = true } +hex-literal = { version = "0.3.1", optional = true } # primitives sp-authority-discovery = { version = "2.0.0-rc6", default-features = false, path = "../../../primitives/authority-discovery" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index e84c3642bcf..8f31e831beb 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0-rc6", path = "../../primitives/consensus/common" } -derive_more = { version = "0.99.2" } +derive_more = "0.99.2" sc-executor = { version = "0.8.0-rc6", path = "../executor" } sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } -fnv = { version = "1.0.6" } -futures = { version = "0.3.1" } +fnv = "1.0.6" +futures = "0.3.1" hash-db = { version = "0.15.2", default-features = false } sp-blockchain = { version = "2.0.0-rc6", path = "../../primitives/blockchain" } -hex-literal = { version = "0.2.1" } +hex-literal = "0.3.1" sp-inherents = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-rc6", path = "../../primitives/keyring" } kvdb = "0.7.0" -log = { version = "0.4.8" } +log = "0.4.8" parking_lot = "0.10.0" lazy_static = "1.4.0" sp-database = { version = "2.0.0-rc6", path = "../../primitives/database" } @@ -44,6 +44,6 @@ sp-transaction-pool = { version = "2.0.0-rc6", path = "../../primitives/transact prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc6", path = "../../utils/prometheus" } [dev-dependencies] -kvdb-memorydb = "0.6.0" +kvdb-memorydb = "0.7.0" sp-test-primitives = { version = "2.0.0-rc6", path = "../../primitives/test-primitives" } substrate-test-runtime = { version = "2.0.0-rc6", path = "../../test-utils/runtime" } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index c25c9479c82..f963068ea37 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -38,7 +38,7 @@ libsecp256k1 = "0.3.4" [dev-dependencies] assert_matches = "1.3.0" wabt = "0.9.2" -hex-literal = "0.2.1" +hex-literal = "0.3.1" sc-runtime-test = { version = "2.0.0-rc6", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0-rc6", path = "../../test-utils/runtime" } sp-state-machine = { version = "0.8.0-rc6", path = "../../primitives/state-machine" } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 016b6e37d2d..501843dc5b6 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" tempfile = "3.1.0" tokio = "0.1.22" futures01 = { package = "futures", version = "0.1.29" } diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 8011ce779df..750123b1461 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -25,7 +25,7 @@ frame-support = { version = "2.0.0-rc6", default-features = false, path = "../su frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = [ "std" ] diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 38fb3d6cd3b..42dc39b775d 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { version = "2.0.0-rc6", default-features = false, path = "../su frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 4d3083720a6..05fbd85bc69 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -31,7 +31,7 @@ wat = { version = "1.0", optional = true, default-features = false } [dev-dependencies] assert_matches = "1.3.0" -hex-literal = "0.2.1" +hex-literal = "0.3.1" pallet-balances = { version = "2.0.0-rc6", path = "../balances" } pallet-timestamp = { version = "2.0.0-rc6", path = "../timestamp" } pallet-randomness-collective-flip = { version = "2.0.0-rc6", path = "../randomness-collective-flip" } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index ac3d4419502..8eb406fc525 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -27,7 +27,7 @@ pallet-balances = { version = "2.0.0-rc6", path = "../balances" } pallet-scheduler = { version = "2.0.0-rc6", path = "../scheduler" } sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = ["std"] diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 3f27df1c564..cf76f085f01 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -23,7 +23,7 @@ frame-benchmarking = { version = "2.0.0-rc6", default-features = false, path = " [dev-dependencies] sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } -hex-literal = "0.2.1" +hex-literal = "0.3.1" pallet-balances = { version = "2.0.0-rc6", path = "../balances" } sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } substrate-test-utils = { version = "2.0.0-rc6", path = "../../test-utils" } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 58dba26af98..01619f2b05a 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -22,7 +22,7 @@ frame-support = { version = "2.0.0-rc6", default-features = false, path = "../su frame-system = { version = "2.0.0-rc6", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" pallet-balances = { version = "2.0.0-rc6", path = "../balances" } [features] diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index bb2cd578978..8114f74b8fe 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -22,7 +22,7 @@ sp-std = { version = "2.0.0-rc6", default-features = false, path = "../../primit sp-io = { version = "2.0.0-rc6", default-features = false, path = "../../primitives/io" } [dev-dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } sp-io ={ version = "2.0.0-rc6", path = "../../primitives/io" } pallet-indices = { version = "2.0.0-rc6", path = "../indices" } diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 5a5643fec9a..a3dbad0cb84 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { version = "2.0.0-rc6", default-features = false, path = "../su impl-trait-for-tuples = "0.1.3" [dev-dependencies] -criterion = "0.2.11" +criterion = "0.3.3" sp-externalities = { version = "0.8.0-rc6", path = "../../primitives/externalities" } substrate-test-runtime-client = { version = "2.0.0-rc6", path = "../../test-utils/runtime/client" } diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 41f144503e4..9ef11a2141b 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -26,7 +26,7 @@ sp-io = { version = "2.0.0-rc6", path = "../../primitives/io" } sp-core = { version = "2.0.0-rc6", path = "../../primitives/core" } pallet-balances = { version = "2.0.0-rc6", path = "../balances" } sp-storage = { version = "2.0.0-rc6", path = "../../primitives/storage" } -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = ["std"] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index f74b0c2738e..1375fa228bf 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -57,9 +57,9 @@ sp-runtime-interface = { version = "2.0.0-rc6", default-features = false, path = [dev-dependencies] sp-serializer = { version = "2.0.0-rc6", path = "../serializer" } pretty_assertions = "0.6.1" -hex-literal = "0.2.1" +hex-literal = "0.3.1" rand = "0.7.2" -criterion = "0.2.11" +criterion = "0.3.3" serde_json = "1.0" rand_chacha = "0.2.2" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index c563fa5384d..88d3b5a75c1 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -29,7 +29,7 @@ itertools = "0.9" smallvec = "1.4.1" [dev-dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } pretty_assertions = "0.6.1" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 9f86736518f..ea13c576b9d 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -impl-serde = { version = "0.2.3", optional = true } +impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" sp-debug-derive = { version = "2.0.0-rc6", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 3ebe53c680a..7705c80270c 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -26,10 +26,10 @@ memory-db = { version = "0.24.0", default-features = false } sp-core = { version = "2.0.0-rc6", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.24.0" +trie-bench = "0.25.0" trie-standardmap = "0.15.2" -criterion = "0.2.11" -hex-literal = "0.2.1" +criterion = "0.3.3" +hex-literal = "0.3.1" sp-runtime = { version = "2.0.0-rc6", path = "../runtime" } [features] diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 8dbf2000f0c..7db79ba0003 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -impl-serde = { version = "0.2.3", optional = true } +impl-serde = { version = "0.3.1", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc6", default-features = false, path = "../std" } -- GitLab From 638771d776d1c59dd1cc4dcec012c7af06074482 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 22 Aug 2020 14:53:39 +0200 Subject: [PATCH 118/132] Fix printing of subkey when using the `--network` override (#6932) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix printing of subkey when using the `--network` override This fixes a bug where `--network` did not printed the account ss58 address for the requested network. Basically we now always print all account ss58 addresses using the requested network. * Review comments * Fixes test * Update client/cli/src/commands/inspect.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update client/cli/src/commands/utils.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Fix more tests Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- bin/utils/subkey/src/lib.rs | 4 +- client/cli/src/commands/generate.rs | 3 +- client/cli/src/commands/generate_node_key.rs | 3 +- client/cli/src/commands/inspect.rs | 18 +-- client/cli/src/commands/key.rs | 4 +- client/cli/src/commands/mod.rs | 2 +- client/cli/src/commands/utils.rs | 107 +++++++++--------- client/cli/src/params/mod.rs | 4 +- primitives/core/src/crypto.rs | 5 +- .../frame-utilities-cli/src/module_id.rs | 8 +- 10 files changed, 80 insertions(+), 78 deletions(-) diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 2e4c7a350fe..bb89541d5b1 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -19,7 +19,7 @@ use structopt::StructOpt; use sc_cli::{ Error, VanityCmd, SignCmd, VerifyCmd, InsertCmd, - GenerateNodeKeyCmd, GenerateCmd, InspectCmd, InspectNodeKeyCmd + GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, InspectNodeKeyCmd }; use substrate_frame_cli::ModuleIdCmd; use sp_core::crypto::Ss58Codec; @@ -38,7 +38,7 @@ pub enum Subkey { Generate(GenerateCmd), /// Gets a public key and a SS58 address from the provided Secret URI - InspectKey(InspectCmd), + InspectKey(InspectKeyCmd), /// Print the peer ID corresponding to the node key in the given file InspectNodeKey(InspectNodeKeyCmd), diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs index 9eeca55a2ee..4664e17551c 100644 --- a/client/cli/src/commands/generate.rs +++ b/client/cli/src/commands/generate.rs @@ -62,7 +62,6 @@ impl GenerateCmd { }; let mnemonic = Mnemonic::new(words, Language::English); let password = self.keystore_params.read_password()?; - let maybe_network = self.network_scheme.network.clone(); let output = self.output_scheme.output_type.clone(); with_crypto_scheme!( @@ -70,7 +69,7 @@ impl GenerateCmd { print_from_uri( mnemonic.phrase(), password, - maybe_network, + self.network_scheme.network.clone(), output ) ); diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index 9ee04d23e34..197e0eb5d90 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -60,8 +60,9 @@ mod tests { #[test] fn generate_node_key() { let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); + let file_path = file.path().display().to_string(); let generate = - GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", "/tmp/keyfile"]); + GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); assert!(generate.run().is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); diff --git a/client/cli/src/commands/inspect.rs b/client/cli/src/commands/inspect.rs index 3356d7ca07a..0c9e54d1185 100644 --- a/client/cli/src/commands/inspect.rs +++ b/client/cli/src/commands/inspect.rs @@ -28,12 +28,14 @@ use structopt::StructOpt; name = "inspect-key", about = "Gets a public key and a SS58 address from the provided Secret URI" )] -pub struct InspectCmd { +pub struct InspectKeyCmd { /// A Key URI to be inspected. May be a secret seed, secret URI /// (with derivation paths and password), SS58 or public URI. - /// If the value is a file, the file content is used as URI. - /// If not given, you will be prompted for the URI. - #[structopt(long)] + /// + /// If the given value is a file, the file content will be used + /// as URI. + /// + /// If omitted, you will be prompted for the URI. uri: Option, #[allow(missing_docs)] @@ -53,7 +55,7 @@ pub struct InspectCmd { pub crypto_scheme: CryptoSchemeFlag, } -impl InspectCmd { +impl InspectKeyCmd { /// Run the command pub fn run(&self) -> Result<(), Error> { let uri = utils::read_uri(self.uri.as_ref())?; @@ -76,7 +78,7 @@ impl InspectCmd { #[cfg(test)] mod tests { - use super::InspectCmd; + use super::*; use structopt::StructOpt; #[test] @@ -86,10 +88,10 @@ mod tests { let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; let inspect = - InspectCmd::from_iter(&["inspect-key", "--uri", words, "--password", "12345"]); + InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); assert!(inspect.run().is_ok()); - let inspect = InspectCmd::from_iter(&["inspect-key", "--uri", seed]); + let inspect = InspectKeyCmd::from_iter(&["inspect-key", seed]); assert!(inspect.run().is_ok()); } } diff --git a/client/cli/src/commands/key.rs b/client/cli/src/commands/key.rs index 61145eace10..50142208b88 100644 --- a/client/cli/src/commands/key.rs +++ b/client/cli/src/commands/key.rs @@ -22,7 +22,7 @@ use structopt::StructOpt; use super::{ insert::InsertCmd, - inspect::InspectCmd, + inspect::InspectKeyCmd, generate::GenerateCmd, inspect_node_key::InspectNodeKeyCmd, generate_node_key::GenerateNodeKeyCmd, @@ -38,7 +38,7 @@ pub enum KeySubcommand { Generate(GenerateCmd), /// Gets a public key and a SS58 address from the provided Secret URI - InspectKey(InspectCmd), + InspectKey(InspectKeyCmd), /// Print the peer ID corresponding to the node key in the given file InspectNodeKey(InspectNodeKeyCmd), diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 33472b29a5e..108c38b19db 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -47,7 +47,7 @@ pub use self::{ sign::SignCmd, generate::GenerateCmd, insert::InsertCmd, - inspect::InspectCmd, + inspect::InspectKeyCmd, generate_node_key::GenerateNodeKeyCmd, inspect_node_key::InspectNodeKeyCmd, key::KeySubcommand, diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 96b6128057a..a3298b222ad 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -54,95 +54,96 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { pub fn print_from_uri( uri: &str, password: Option, - network_override: Ss58AddressFormat, + network_override: Option, output: OutputType, -) - where - Pair: sp_core::Pair, - Pair::Public: Into, -{ +) where Pair: sp_core::Pair, Pair::Public: Into { let password = password.as_ref().map(|s| s.expose_secret().as_str()); if let Ok((pair, seed)) = Pair::from_phrase(uri, password.clone()) { let public_key = pair.public(); + let network_override = network_override.unwrap_or_default(); match output { OutputType::Json => { let json = json!({ - "secretPhrase": uri, - "secretSeed": format_seed::(seed), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": pair.public().into().into_account().to_ss58check(), - }); + "secretPhrase": uri, + "secretSeed": format_seed::(seed), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), + }); println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); }, OutputType::Text => { - println!("Secret phrase `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - format_seed::(seed), - format_public_key::(public_key.clone()), - format_account_id::(public_key), - pair.public().into().into_account().to_ss58check(), + println!( + "Secret phrase `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + format_seed::(seed), + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check_with_version(network_override), ); }, } } else if let Ok((pair, seed)) = Pair::from_string_with_seed(uri, password.clone()) { let public_key = pair.public(); + let network_override = network_override.unwrap_or_default(); match output { OutputType::Json => { let json = json!({ - "secretKeyUri": uri, - "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": pair.public().into().into_account().to_ss58check(), - }); + "secretKeyUri": uri, + "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), + }); println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); }, OutputType::Text => { - println!("Secret Key URI `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - format_public_key::(public_key.clone()), - format_account_id::(public_key), - pair.public().into().into_account().to_ss58check(), + println!( + "Secret Key URI `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check_with_version(network_override), ); }, } - } else if let Ok((public_key, _v)) = Pair::Public::from_string_with_version(uri) { - let v = network_override; + } else if let Ok((public_key, network)) = Pair::Public::from_string_with_version(uri) { + let network_override = network_override.unwrap_or(network); match output { OutputType::Json => { let json = json!({ - "publicKeyUri": uri, - "networkId": String::from(v), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key.clone()), - "ss58Address": public_key.to_ss58check_with_version(v), - }); + "publicKeyUri": uri, + "networkId": String::from(network_override), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key.clone()), + "ss58Address": public_key.to_ss58check_with_version(network_override), + }); println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); }, OutputType::Text => { - println!("Public Key URI `{}` is account:\n \ - Network ID/version: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", + println!( + "Public Key URI `{}` is account:\n \ + Network ID/version: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", uri, - String::from(v), + String::from(network_override), format_public_key::(public_key.clone()), format_account_id::(public_key.clone()), - public_key.to_ss58check_with_version(v), + public_key.to_ss58check_with_version(network_override), ); }, } diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 5245c1220fb..93467bc8ec6 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -153,12 +153,12 @@ pub struct NetworkSchemeFlag { #[structopt( long, value_name = "NETWORK", + short = "n", possible_values = &Ss58AddressFormat::all_names()[..], parse(try_from_str = Ss58AddressFormat::try_from), case_insensitive = true, - default_value = "polkadot" )] - pub network: Ss58AddressFormat, + pub network: Option, } #[cfg(test)] diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index d6f0850d9ed..77a339ac7c6 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -265,7 +265,6 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { } /// Return the ss58-check string for this key. - #[cfg(feature = "std")] fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { let mut v = vec![version.into()]; @@ -274,9 +273,11 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { v.extend(&r.as_bytes()[0..2]); v.to_base58() } + /// Return the ss58-check string for this key. #[cfg(feature = "std")] fn to_ss58check(&self) -> String { self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) } + /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. #[cfg(feature = "std")] @@ -377,7 +378,7 @@ macro_rules! ss58_address_format { Ss58AddressFormat::Custom(n) if n == x => Ok(Ss58AddressFormat::Custom(x)), _ => Err(()), } - + #[cfg(not(feature = "std"))] Err(()) }, diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/module_id.rs index 3739d668e3d..cc76c70d0fa 100644 --- a/utils/frame/frame-utilities-cli/src/module_id.rs +++ b/utils/frame/frame-utilities-cli/src/module_id.rs @@ -44,9 +44,8 @@ pub struct ModuleIdCmd { possible_values = &Ss58AddressFormat::all_names()[..], parse(try_from_str = Ss58AddressFormat::try_from), case_insensitive = true, - default_value = "substrate" )] - pub network: Ss58AddressFormat, + pub network: Option, #[allow(missing_docs)] #[structopt(flatten)] @@ -78,14 +77,13 @@ impl ModuleIdCmd { .map_err(|_| "Cannot convert argument to moduleid: argument should be 8-character string")?; let account_id: R::AccountId = ModuleId(id_fixed_array).into_account(); - let network = self.network; with_crypto_scheme!( self.crypto_scheme.scheme, print_from_uri( - &account_id.to_ss58check_with_version(network), + &account_id.to_ss58check_with_version(self.network.clone().unwrap_or_default()), password, - network, + self.network, self.output_scheme.output_type.clone() ) ); -- GitLab From dfe2871b272d2bb343c8fb2b1f0bb671324e52e9 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Sun, 23 Aug 2020 15:52:40 +0500 Subject: [PATCH 119/132] Time-delay proxies (#6770) * Time-delay proxies. * Tests * Initial couple of benchmarks * Fix up runtime * Last couple of benchmarks * Tests * Docs * Migration * add tests to proxy benchmarks * generated benchmarks, not integrated * Fix weight trait * integrate weightinfo * default weight * Grumble * Deduplication, split proxy from announced_proxy and don't require reauthentication * Fix * Remoe superfluous * Typos * Indent * Fix * Fixes * rename 'proxy_announced' -> 'announced_proxy' * flip rename * comments and spacing * fix proxy_announced * remove unneeded `execute` marker * Avoid unneeded changes to extrinsic indices * Cleanup * Fixes * Update Benchmarks and Weights for Delayed Proxy (#6811) * update bechmarks to parameterize announcements * remove announcement param from proxy * Update pallet_proxy.rs * Update weights * Bump runtime * Fix benchmark Co-authored-by: Shawn Tabrizi --- bin/node/runtime/src/lib.rs | 11 +- bin/node/runtime/src/weights/mod.rs | 1 + .../runtime/src/weights/pallet_balances.rs | 2 +- bin/node/runtime/src/weights/pallet_proxy.rs | 85 ++++ frame/proxy/src/benchmarking.rs | 189 +++++++- frame/proxy/src/default_weight.rs | 84 ++++ frame/proxy/src/lib.rs | 441 ++++++++++++++---- frame/proxy/src/tests.rs | 180 +++++-- 8 files changed, 866 insertions(+), 127 deletions(-) create mode 100644 bin/node/runtime/src/weights/pallet_proxy.rs create mode 100644 frame/proxy/src/default_weight.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 9595ef424d8..e7842e5c4ba 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -109,7 +109,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 257, + spec_version: 258, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -216,6 +216,9 @@ parameter_types! { // Additional storage item size of 33 bytes. pub const ProxyDepositFactor: Balance = deposit(0, 33); pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; } /// The type used to represent the kinds of proxying allowed. @@ -261,7 +264,11 @@ impl pallet_proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; - type WeightInfo = (); + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; } parameter_types! { diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs index 0e078e7ac08..322fb8886c0 100644 --- a/bin/node/runtime/src/weights/mod.rs +++ b/bin/node/runtime/src/weights/mod.rs @@ -18,5 +18,6 @@ pub mod frame_system; pub mod pallet_balances; pub mod pallet_democracy; +pub mod pallet_proxy; pub mod pallet_timestamp; pub mod pallet_utility; diff --git a/bin/node/runtime/src/weights/pallet_balances.rs b/bin/node/runtime/src/weights/pallet_balances.rs index 21a90a97e63..bcbc4ced6ef 100644 --- a/bin/node/runtime/src/weights/pallet_balances.rs +++ b/bin/node/runtime/src/weights/pallet_balances.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for the Balances Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; diff --git a/bin/node/runtime/src/weights/pallet_proxy.rs b/bin/node/runtime/src/weights/pallet_proxy.rs new file mode 100644 index 00000000000..92c43cd4853 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_proxy.rs @@ -0,0 +1,85 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_proxy::WeightInfo for WeightInfo { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index f68a2c3a4cd..5f1d79741dd 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -20,13 +20,21 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::RawOrigin; +use frame_system::{RawOrigin, EventRecord}; use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Proxy; const SEED: u32 = 0; +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -34,7 +42,38 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), Proxy::::add_proxy( RawOrigin::Signed(caller.clone()).into(), account("target", i, SEED), - T::ProxyType::default() + T::ProxyType::default(), + T::BlockNumber::zero(), + )?; + } + Ok(()) +} + +fn add_announcements( + n: u32, + maybe_who: Option, + maybe_real: Option +) -> Result<(), &'static str> { + let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let real = if let Some(real) = maybe_real { + real + } else { + let real = account("real", 0, SEED); + T::Currency::make_free_balance_be(&real, BalanceOf::::max_value()); + Proxy::::add_proxy( + RawOrigin::Signed(real.clone()).into(), + caller.clone(), + T::ProxyType::default(), + T::BlockNumber::zero(), + )?; + real + }; + for _ in 0..n { + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&("add_announcement", n)), )?; } Ok(()) @@ -49,43 +88,171 @@ benchmarks! { let p in ...; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + } + + proxy_announced { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("anonymous", 0, SEED); + let delegate: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(delegate.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(delegate.clone()), None)?; + }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + } + + remove_announcement { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + reject_announcement { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + announce { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + add_announcements::(a, Some(caller.clone()), None)?; + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call_hash = T::CallHasher::hash_of(&call); + }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) + verify { + assert_last_event::(RawEvent::Announced(real, caller, call_hash).into()); + } add_proxy { let p in ...; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), account("target", T::MaxProxies::get().into(), SEED), T::ProxyType::default()) + }: _( + RawOrigin::Signed(caller.clone()), + account("target", T::MaxProxies::get().into(), SEED), + T::ProxyType::default(), + T::BlockNumber::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p + 1); + } remove_proxy { let p in ...; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), account("target", 0, SEED), T::ProxyType::default()) + }: _( + RawOrigin::Signed(caller.clone()), + account("target", 0, SEED), + T::ProxyType::default(), + T::BlockNumber::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p - 1); + } remove_proxies { let p in ...; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller)) + }: _(RawOrigin::Signed(caller.clone())) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, 0); + } anonymous { let p in ...; - }: _(RawOrigin::Signed(whitelisted_caller()), T::ProxyType::default(), 0) + let caller: T::AccountId = whitelisted_caller(); + }: _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + T::BlockNumber::zero(), + 0 + ) + verify { + let anon_account = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(RawEvent::AnonymousCreated( + anon_account, + caller, + T::ProxyType::default(), + 0, + ).into()); + } kill_anonymous { let p in 0 .. (T::MaxProxies::get() - 2).into(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Module::::anonymous(RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), 0)?; + Module::::anonymous( + RawOrigin::Signed(whitelisted_caller()).into(), + T::ProxyType::default(), + T::BlockNumber::zero(), + 0 + )?; let height = system::Module::::block_number(); let ext_index = system::Module::::extrinsic_index().unwrap_or(0); let anon = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(anon.clone()))?; - - }: _(RawOrigin::Signed(anon), caller, T::ProxyType::default(), 0, height, ext_index) + ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); + }: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index) + verify { + assert!(!Proxies::::contains_key(&anon)); + } } #[cfg(test)] @@ -98,6 +265,10 @@ mod tests { fn test_benchmarks() { new_test_ext().execute_with(|| { assert_ok!(test_benchmark_proxy::()); + assert_ok!(test_benchmark_proxy_announced::()); + assert_ok!(test_benchmark_remove_announcement::()); + assert_ok!(test_benchmark_reject_announcement::()); + assert_ok!(test_benchmark_announce::()); assert_ok!(test_benchmark_add_proxy::()); assert_ok!(test_benchmark_remove_proxy::()); assert_ok!(test_benchmark_remove_proxies::()); diff --git a/frame/proxy/src/default_weight.rs b/frame/proxy/src/default_weight.rs new file mode 100644 index 00000000000..183c0b81c8a --- /dev/null +++ b/frame/proxy/src/default_weight.rs @@ -0,0 +1,84 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index ec430078276..5a852ea9f53 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -19,6 +19,10 @@ //! A module allowing accounts to give permission to other accounts to dispatch types of calls from //! their signed origin. //! +//! The accounts to which permission is delegated may be requied to announce the action that they +//! wish to execute some duration prior to execution happens. In this case, the target account may +//! reject the announcement and in doing so, veto the execution. +//! //! - [`proxy::Trait`](./trait.Trait.html) //! - [`Call`](./enum.Call.html) //! @@ -37,23 +41,27 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; use sp_io::hashing::blake2_256; -use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero}}; -use sp_runtime::traits::Member; +use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero, Hash, Member, Saturating}}; use frame_support::{ - decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, traits::{ - Get, ReservableCurrency, Currency, InstanceFilter, - OriginTrait, IsType, - }, weights::{Weight, GetDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, - dispatch::{PostDispatchInfo, IsSubType}, + decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug, traits::{ + Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, + }, weights::{Weight, GetDispatchInfo}, + dispatch::{PostDispatchInfo, IsSubType}, storage::IterableStorageMap, }; use frame_system::{self as system, ensure_signed}; +use frame_support::dispatch::DispatchError; mod tests; mod benchmarking; +mod default_weight; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub trait WeightInfo { + fn proxy_announced(a: u32, p: u32, ) -> Weight; + fn remove_announcement(a: u32, p: u32, ) -> Weight; + fn reject_announcement(a: u32, p: u32, ) -> Weight; + fn announce(a: u32, p: u32, ) -> Weight; fn proxy(p: u32, ) -> Weight; fn add_proxy(p: u32, ) -> Weight; fn remove_proxy(p: u32, ) -> Weight; @@ -62,15 +70,6 @@ pub trait WeightInfo { fn kill_anonymous(p: u32, ) -> Weight; } -impl WeightInfo for () { - fn proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn add_proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn remove_proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn remove_proxies(_p: u32, ) -> Weight { 1_000_000_000 } - fn anonymous(_p: u32, ) -> Weight { 1_000_000_000 } - fn kill_anonymous(_p: u32, ) -> Weight { 1_000_000_000 } -} - /// Configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -108,19 +107,67 @@ pub trait Trait: frame_system::Trait { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The maximum amount of time-delayed announcements that are allowed to be pending. + type MaxPending: Get; + + /// The type of hash used for hashing the call. + type CallHasher: Hash; + + /// The base amount of currency needed to reserve for creating an announcement. + /// + /// This is held when a new storage item holding a `Balance` is created (typically 16 bytes). + type AnnouncementDepositBase: Get>; + + /// The amount of currency needed per announcement made. + /// + /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) + /// into a pre-existing storage value. + type AnnouncementDepositFactor: Get>; +} + +/// The parameters under which a particular account has a proxy relationship with some other +/// account. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +pub struct ProxyDefinition { + /// The account which may act on behalf of another. + delegate: AccountId, + /// A value defining the subset of calls that it is allowed to make. + proxy_type: ProxyType, + /// The number of blocks that an announcement must be in place for before the corresponding call + /// may be dispatched. If zero, then no announcement is needed. + delay: BlockNumber, +} + +/// Details surrounding a specific instance of an announcement to make a call. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug)] +pub struct Announcement { + /// The account which made the announcement. + real: AccountId, + /// The hash of the call to be made. + call_hash: Hash, + /// The height at which the announcement was made. + height: BlockNumber, } +type CallHashOf = <::CallHasher as Hash>::Output; + decl_storage! { trait Store for Module as Proxy { /// The set of account proxies. Maps the account which has delegated to the accounts /// which are being delegated to, together with the amount held on deposit. - pub Proxies: map hasher(twox_64_concat) T::AccountId => (Vec<(T::AccountId, T::ProxyType)>, BalanceOf); + pub Proxies: map hasher(twox_64_concat) T::AccountId + => (Vec>, BalanceOf); + + /// The announcements made by the proxy (key). + pub Announcements: map hasher(twox_64_concat) T::AccountId + => (Vec, T::BlockNumber>>, BalanceOf); } } decl_error! { pub enum Error for Module { - /// There are too many proxies registered. + /// There are too many proxies registered or too many announcements pending. TooMany, /// Proxy registration not found. NotFound, @@ -132,6 +179,8 @@ decl_error! { Duplicate, /// Call may not be made by proxy because it may escalate its privileges. NoPermission, + /// Announcement, if made at all, was made too recently. + Unannounced, } } @@ -139,13 +188,16 @@ decl_event! { /// Events type. pub enum Event where AccountId = ::AccountId, - ProxyType = ::ProxyType + ProxyType = ::ProxyType, + Hash = CallHashOf, { /// A proxy was executed correctly, with the given [result]. ProxyExecuted(DispatchResult), /// Anonymous account has been created by new proxy with given /// disambiguation index and proxy type. [anonymous, who, proxy_type, disambiguation_index] AnonymousCreated(AccountId, AccountId, ProxyType, u16), + /// An announcement was placed to make a call in the future. [real, proxy, call_hash] + Announced(AccountId, AccountId, Hash), } } @@ -165,9 +217,36 @@ decl_module! { /// The maximum amount of proxies allowed for a single account. const MaxProxies: u16 = T::MaxProxies::get(); + /// `MaxPending` metadata shadow. + const MaxPending: u32 = T::MaxPending::get(); + + /// `AnnouncementDepositBase` metadata shadow. + const AnnouncementDepositBase: BalanceOf = T::AnnouncementDepositBase::get(); + + /// `AnnouncementDepositFactor` metadata shadow. + const AnnouncementDepositFactor: BalanceOf = T::AnnouncementDepositFactor::get(); + + fn on_runtime_upgrade() -> Weight { + Proxies::::translate::<(Vec<(T::AccountId, T::ProxyType)>, BalanceOf), _>( + |_, (targets, deposit)| Some(( + targets.into_iter() + .map(|(a, t)| ProxyDefinition { + delegate: a, + proxy_type: t, + delay: Zero::zero(), + }) + .collect::>(), + deposit, + )) + ); + T::MaximumBlockWeight::get() + } + /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// + /// Removes any corresponding announcement(s). + /// /// The dispatch origin for this call must be _Signed_. /// /// Parameters: @@ -176,43 +255,24 @@ decl_module! { /// - `call`: The call to be made by the `real` account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 19.87 + .141 * P µs - /// - DB weight: 1 storage read. - /// - Plus the weight of the `call` + /// Weight is a function of the number of proxies the user has (P). /// # #[weight = { let di = call.get_dispatch_info(); - (T::DbWeight::get().reads(1) - .saturating_add(di.weight) - .saturating_add(20 * WEIGHT_PER_MICROS) - .saturating_add((140 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())), + (T::WeightInfo::proxy(T::MaxProxies::get().into()) + .saturating_add(di.weight), di.class) }] fn proxy(origin, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call> + call: Box<::Call>, ) { let who = ensure_signed(origin)?; - let (_, proxy_type) = Proxies::::get(&real).0.into_iter() - .find(|x| &x.0 == &who && force_proxy_type.as_ref().map_or(true, |y| &x.1 == y)) - .ok_or(Error::::NotProxy)?; + let def = Self::find_proxy(&real, &who, force_proxy_type)?; + ensure!(def.delay.is_zero(), Error::::Unannounced); - // This is a freshly authenticated new account, the origin restrictions doesn't apply. - let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); - match c.is_sub_type() { - Some(Call::add_proxy(_, ref pt)) | Some(Call::remove_proxy(_, ref pt)) - if !proxy_type.is_superset(&pt) => false, - Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) - if proxy_type != T::ProxyType::default() => false, - _ => proxy_type.filter(c) - } - }); - let e = call.dispatch(origin); - Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + Self::do_proxy(def, real, *call); } /// Register a proxy account for the sender that is able to make calls on its behalf. @@ -224,21 +284,20 @@ decl_module! { /// - `proxy_type`: The permissions allowed for this proxy account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 17.48 + .176 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(18 * WEIGHT_PER_MICROS) - .saturating_add((200 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn add_proxy(origin, proxy: T::AccountId, proxy_type: T::ProxyType) -> DispatchResult { + #[weight = T::WeightInfo::add_proxy(T::MaxProxies::get().into())] + fn add_proxy(origin, + delegate: T::AccountId, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + ) -> DispatchResult { let who = ensure_signed(origin)?; Proxies::::try_mutate(&who, |(ref mut proxies, ref mut deposit)| { ensure!(proxies.len() < T::MaxProxies::get() as usize, Error::::TooMany); - let typed_proxy = (proxy, proxy_type); - let i = proxies.binary_search(&typed_proxy).err().ok_or(Error::::Duplicate)?; - proxies.insert(i, typed_proxy); + let proxy_def = ProxyDefinition { delegate, proxy_type, delay }; + let i = proxies.binary_search(&proxy_def).err().ok_or(Error::::Duplicate)?; + proxies.insert(i, proxy_def); let new_deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get() * (proxies.len() as u32).into(); if new_deposit > *deposit { @@ -260,20 +319,19 @@ decl_module! { /// - `proxy_type`: The permissions currently enabled for the removed proxy account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 14.37 + .164 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(14 * WEIGHT_PER_MICROS) - .saturating_add((160 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn remove_proxy(origin, proxy: T::AccountId, proxy_type: T::ProxyType) -> DispatchResult { + #[weight = T::WeightInfo::remove_proxy(T::MaxProxies::get().into())] + fn remove_proxy(origin, + delegate: T::AccountId, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + ) -> DispatchResult { let who = ensure_signed(origin)?; Proxies::::try_mutate_exists(&who, |x| { let (mut proxies, old_deposit) = x.take().ok_or(Error::::NotFound)?; - let typed_proxy = (proxy, proxy_type); - let i = proxies.binary_search(&typed_proxy).ok().ok_or(Error::::NotFound)?; + let proxy_def = ProxyDefinition { delegate, proxy_type, delay }; + let i = proxies.binary_search(&proxy_def).ok().ok_or(Error::::NotFound)?; proxies.remove(i); let new_deposit = if proxies.is_empty() { BalanceOf::::zero() @@ -300,14 +358,9 @@ decl_module! { /// the unreserved fees will be inaccessible. **All access to this account will be lost.** /// /// # - /// P is the number of proxies the user has - /// - Base weight: 13.73 + .129 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(14 * WEIGHT_PER_MICROS) - .saturating_add((130 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] + #[weight = T::WeightInfo::remove_proxies(T::MaxProxies::get().into())] fn remove_proxies(origin) { let who = ensure_signed(origin)?; let (_, old_deposit) = Proxies::::take(&who); @@ -325,6 +378,8 @@ decl_module! { /// - `index`: A disambiguation index, in case this is called multiple times in the same /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just /// want to use `0`. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. /// /// Fails with `Duplicate` if this has already been called in this transaction, from the /// same sender, with the same parameters. @@ -332,22 +387,23 @@ decl_module! { /// Fails if there are insufficient funds to pay for deposit. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 36.48 + .039 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(36 * WEIGHT_PER_MICROS) - .saturating_add((40 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn anonymous(origin, proxy_type: T::ProxyType, index: u16) { + /// TODO: Might be over counting 1 read + #[weight = T::WeightInfo::anonymous(T::MaxProxies::get().into())] + fn anonymous(origin, proxy_type: T::ProxyType, delay: T::BlockNumber, index: u16) { let who = ensure_signed(origin)?; let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; - Proxies::::insert(&anonymous, (vec![(who.clone(), proxy_type.clone())], deposit)); + let proxy_def = ProxyDefinition { + delegate: who.clone(), + proxy_type: proxy_type.clone(), + delay, + }; + Proxies::::insert(&anonymous, (vec![proxy_def], deposit)); Self::deposit_event(RawEvent::AnonymousCreated(anonymous, who, proxy_type, index)); } @@ -369,14 +425,9 @@ decl_module! { /// account whose `anonymous` call has corresponding parameters. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 15.65 + .137 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(15 * WEIGHT_PER_MICROS) - .saturating_add((140 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] + #[weight = T::WeightInfo::kill_anonymous(T::MaxProxies::get().into())] fn kill_anonymous(origin, spawner: T::AccountId, proxy_type: T::ProxyType, @@ -393,6 +444,140 @@ decl_module! { let (_, deposit) = Proxies::::take(&who); T::Currency::unreserve(&spawner, deposit); } + + /// Publish the hash of a proxy-call that will be made in the future. + /// + /// This must be called some number of blocks before the corresponding `proxy` is attempted + /// if the delay associated with the proxy relationship is greater than zero. + /// + /// No more than `MaxPending` announcements may be made at any one time. + /// + /// This will take a deposit of `AnnouncementDepositFactor` as well as + /// `AnnouncementDepositBase` if there are no other pending announcements. + /// + /// The dispatch origin for this call must be _Signed_ and a proxy of `real`. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into())] + fn announce(origin, real: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Proxies::::get(&real).0.into_iter() + .find(|x| &x.delegate == &who) + .ok_or(Error::::NotProxy)?; + + let announcement = Announcement { + real: real.clone(), + call_hash: call_hash.clone(), + height: system::Module::::block_number(), + }; + + Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { + ensure!(pending.len() < T::MaxPending::get() as usize, Error::::TooMany); + pending.push(announcement); + Self::rejig_deposit( + &who, + *deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + ).map(|d| d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed")) + .map(|d| *deposit = d) + })?; + Self::deposit_event(RawEvent::Announced(real, who, call_hash)); + } + + /// Remove a given announcement. + /// + /// May be called by a proxy account to remove a call they previously announced and return + /// the deposit. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] + fn remove_announcement(origin, real: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; + } + + /// Remove the given announcement of a delegate. + /// + /// May be called by a target (proxied) account to remove a call that one of their delegates + /// (`delegate`) has announced they want to execute. The deposit is returned. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `delegate`: The account that previously announced the call. + /// - `call_hash`: The hash of the call to be made. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] + fn reject_announcement(origin, delegate: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Self::edit_announcements(&delegate, |ann| ann.real != who || ann.call_hash != call_hash)?; + } + + /// Dispatch the given `call` from an account that the sender is authorised for through + /// `add_proxy`. + /// + /// Removes any corresponding announcement(s). + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. + /// - `call`: The call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = { + let di = call.get_dispatch_info(); + (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get().into()) + .saturating_add(di.weight), + di.class) + }] + fn proxy_announced(origin, + delegate: T::AccountId, + real: T::AccountId, + force_proxy_type: Option, + call: Box<::Call>, + ) { + ensure_signed(origin)?; + let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; + + let call_hash = T::CallHasher::hash_of(&call); + let now = system::Module::::block_number(); + Self::edit_announcements(&delegate, |ann| + ann.real != real || ann.call_hash != call_hash || now.saturating_sub(ann.height) < def.delay + ).map_err(|_| Error::::Unannounced)?; + + Self::do_proxy(def, real, *call); + } } } @@ -411,4 +596,82 @@ impl Module { .using_encoded(blake2_256); T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } + + fn rejig_deposit( + who: &T::AccountId, + old_deposit: BalanceOf, + base: BalanceOf, + factor: BalanceOf, + len: usize, + ) -> Result>, DispatchError> { + let new_deposit = if len == 0 { + BalanceOf::::zero() + } else { + base + factor * (len as u32).into() + }; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + T::Currency::unreserve(&who, old_deposit - new_deposit); + } + Ok(if len == 0 { + None + } else { + Some(new_deposit) + }) + } + + fn edit_announcements< + F: FnMut(&Announcement, T::BlockNumber>) -> bool + >(delegate: &T::AccountId, f: F) -> DispatchResult { + Announcements::::try_mutate_exists(delegate, |x| { + let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; + let orig_pending_len = pending.len(); + pending.retain(f); + ensure!(orig_pending_len > pending.len(), Error::::NotFound); + *x = Self::rejig_deposit( + delegate, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + )?.map(|deposit| (pending, deposit)); + Ok(()) + }) + } + + fn find_proxy( + real: &T::AccountId, + delegate: &T::AccountId, + force_proxy_type: Option, + ) -> Result, DispatchError> { + let f = |x: &ProxyDefinition| -> bool { + &x.delegate == delegate && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + }; + Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) + } + + fn do_proxy( + def: ProxyDefinition, + real: T::AccountId, + call: ::Call, + ) { + // This is a freshly authenticated new account, the origin restrictions doesn't apply. + let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); + // We make sure the proxy call does access this pallet to change modify proxies. + match c.is_sub_type() { + // Proxy call cannot add or remove a proxy with more permissions than it already has. + Some(Call::add_proxy(_, ref pt, _)) | Some(Call::remove_proxy(_, ref pt, _)) + if !def.proxy_type.is_superset(&pt) => false, + // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full permissions. + Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) + if def.proxy_type != T::ProxyType::default() => false, + _ => def.proxy_type.filter(c) + } + }); + let e = call.dispatch(origin); + Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 11f11e24d47..00d84e65ad1 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -108,6 +108,9 @@ parameter_types! { pub const ProxyDepositBase: u64 = 1; pub const ProxyDepositFactor: u64 = 1; pub const MaxProxies: u16 = 4; + pub const MaxPending: u32 = 2; + pub const AnnouncementDepositBase: u64 = 1; + pub const AnnouncementDepositFactor: u64 = 1; } #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] pub enum ProxyType { @@ -148,6 +151,10 @@ impl Trait for Test { type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; type WeightInfo = (); + type CallHasher = BlakeTwo256; + type MaxPending = MaxPending; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; } type System = frame_system::Module; @@ -189,13 +196,134 @@ fn expect_events(e: Vec) { assert_eq!(last_events(e.len()), e); } +#[test] +fn announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_eq!(Balances::reserved_balance(3), 0); + + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + assert_eq!(Announcements::::get(3), (vec![ + Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1, + }, + Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }, + ], 3)); + assert_eq!(Balances::reserved_balance(3), 3); + + assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); + }); +} + +#[test] +fn remove_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); + assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + +#[test] +fn reject_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!(Proxy::reject_announcement(Origin::signed(1), 3, [0; 32].into()), e); + let e = Error::::NotFound; + assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); + assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + +#[test] +fn announcer_must_be_proxy() { + new_test_ext().execute_with(|| { + assert_noop!(Proxy::announce(Origin::signed(2), 1, H256::zero()), Error::::NotProxy); + }); +} + +#[test] +fn delayed_requires_pre_announcement() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 1)); + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy(Origin::signed(2), 1, None, call.clone()), e); + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); + system::Module::::set_block_number(2); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); + }); +} + +#[test] +fn proxy_announced_removes_announcement_and_returns_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(Origin::signed(3), 1, call_hash)); + assert_ok!(Proxy::announce(Origin::signed(3), 2, call_hash)); + // Too early to execute announced call + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); + + system::Module::::set_block_number(2); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash, + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + #[test] fn filtering_works() { new_test_ext().execute_with(|| { Balances::mutate_account(&1, |a| a.free = 1000); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); @@ -228,7 +356,7 @@ fn filtering_works() { RawEvent::ProxyExecuted(Ok(())).into(), ]); - let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any))); + let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any, 0))); let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); @@ -253,24 +381,24 @@ fn filtering_works() { #[test] fn add_remove_proxies_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any)); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any), Error::::Duplicate); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any), Error::::TooMany); - assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer), Error::::NotFound); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany); + assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -278,10 +406,10 @@ fn add_remove_proxies_works() { #[test] fn cannot_add_proxy_without_balance() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(5), 2); assert_noop!( - Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any), + Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any, 0), BalancesError::::InsufficientBalance ); }); @@ -290,8 +418,8 @@ fn cannot_add_proxy_without_balance() { #[test] fn proxying_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_noop!(Proxy::proxy(Origin::signed(4), 1, None, call.clone()), Error::::NotProxy); @@ -319,21 +447,21 @@ fn proxying_works() { #[test] fn anonymous_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); expect_event(RawEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0)); // other calls to anonymous allowed as long as they're not exactly the same. - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 1)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0)); - assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0), Error::::Duplicate); + assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); + assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate); System::set_extrinsic_index(1); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); -- GitLab From ddf333c4441b02ff67ef0b17b8e4259ea6f34d93 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sun, 23 Aug 2020 22:26:42 +0200 Subject: [PATCH 120/132] Wait for all notifications protocols to be open before reporting opening (#6821) * Wait for all notifications protocols to be open before reporting opening * Update client/network/src/protocol/generic_proto/handler/notif_out.rs Co-authored-by: Max Inden * Concern * Fix attempt * Another fix attempt * Update client/network/src/protocol/generic_proto/handler/group.rs Co-authored-by: Max Inden Co-authored-by: parity-processbot <> Co-authored-by: Max Inden --- .../protocol/generic_proto/handler/group.rs | 182 ++++++++++-------- .../protocol/generic_proto/handler/legacy.rs | 21 -- .../generic_proto/handler/notif_out.rs | 16 ++ 3 files changed, 122 insertions(+), 97 deletions(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index dfa06693698..bcdbaf84851 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -107,9 +107,17 @@ pub struct NotifsHandler { /// Handlers for outbound substreams, and the initial handshake message we send. out_handlers: Vec<(NotifsOutHandler, Arc>>)>, + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + /// Handler for backwards-compatibility. legacy: LegacyProtoHandler, + /// In the situation where `legacy.is_open()` is true, but we haven't sent out any + /// [`NotifsHandlerOut::Open`] event yet, this contains the handshake received on the legacy + /// substream. + pending_legacy_handshake: Option>, + /// State of this handler. enabled: EnabledState, @@ -123,6 +131,9 @@ pub struct NotifsHandler { /// We use two different channels in order to have two different channel sizes, but from the /// receiving point of view, the two channels are the same. /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + /// + /// Contains `Some` if and only if it has been reported to the user that the substreams are + /// open. notifications_sink_rx: Option< stream::Select< stream::Fuse>, @@ -159,7 +170,9 @@ impl IntoProtocolsHandler for NotifsHandlerProto { .into_iter() .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) .collect(), + endpoint: connected_point.clone(), legacy: self.legacy.into_handler(remote_peer_id, connected_point), + pending_legacy_handshake: None, enabled: EnabledState::Initial, pending_in: Vec::new(), notifications_sink_rx: None, @@ -617,87 +630,80 @@ impl ProtocolsHandler for NotifsHandler { } } - if let Poll::Ready(ev) = self.legacy.poll(cx) { - return match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::B), - info: None, - }), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - endpoint, - received_handshake, - .. - }) => { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), + // If `self.pending_legacy_handshake` is `Some`, we are in a state where the legacy + // substream is open but the user isn't aware yet of the substreams being open. + // When that is the case, neither the legacy substream nor the incoming notifications + // substreams should be polled, otherwise there is a risk of receiving messages from them. + if self.pending_legacy_handshake.is_none() { + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::B), + info: None, }), - }; - - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); - - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - debug_assert!(self.notifications_sink_rx.is_some()); - self.notifications_sink_rx = None; - - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint, reason } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::ProtocolError { is_severe, error } - )), - ProtocolsHandlerEvent::Close(err) => - Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { + received_handshake, + .. + }) => { + self.pending_legacy_handshake = Some(received_handshake); + cx.waker().wake_by_ref(); + return Poll::Pending; + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { + // We consciously drop the receivers despite notifications being potentially + // still buffered up. + debug_assert!(self.notifications_sink_rx.is_some()); + self.notifications_sink_rx = None; + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { + debug_assert!(self.notifications_sink_rx.is_some()); + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + )), + ProtocolsHandlerEvent::Close(err) => + return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + } } - } - for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = handshake_message.read().clone(); - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) + for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => + error!("Incoming substream handler tried to open a substream"), + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => + match self.enabled { + EnabledState::Initial => self.pending_in.push(handler_num), + EnabledState::Enabled => { + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = handshake_message.read().clone(); + handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) + }, + EnabledState::Disabled => + handler.inject_event(NotifsInHandlerIn::Refuse), }, - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { + if self.notifications_sink_rx.is_some() { + let msg = NotifsHandlerOut::Notification { + message, + protocol_name: handler.protocol_name().to_owned().into(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); + } }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - // Note that right now the legacy substream has precedence over - // everything. If it is not open, then we consider that nothing is open. - if self.legacy.is_open() { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().to_owned().into(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } - }, + } } } } @@ -725,6 +731,30 @@ impl ProtocolsHandler for NotifsHandler { } } + if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { + if let Some(handshake) = self.pending_legacy_handshake.take() { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(self.notifications_sink_rx.is_none()); + self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Open { + endpoint: self.endpoint.clone(), + received_handshake: handshake, + notifications_sink + } + )) + } + } + Poll::Pending } } diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs index 71d6175f066..7d31ed323a4 100644 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -222,16 +222,12 @@ pub enum LegacyProtoHandlerOut { /// Handshake message that has been sent to us. /// This is normally a "Status" message, but this out of the concern of this code. received_handshake: Vec, - /// The connected endpoint. - endpoint: ConnectedPoint, }, /// Closed a custom protocol with the remote. CustomProtocolClosed { /// Reason why the substream closed, for diagnostic purposes. reason: Cow<'static, str>, - /// The connected endpoint. - endpoint: ConnectedPoint, }, /// Receives a message on a custom protocol substream. @@ -250,18 +246,6 @@ pub enum LegacyProtoHandlerOut { } impl LegacyProtoHandler { - /// Returns true if the legacy substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - ProtocolState::Init { substreams, .. } => !substreams.is_empty(), - ProtocolState::Opening { .. } => false, - ProtocolState::Normal { substreams, .. } => !substreams.is_empty(), - ProtocolState::Disabled { .. } => false, - ProtocolState::KillAsap => false, - ProtocolState::Poisoned => false, - } - } - /// Enables the handler. fn enable(&mut self) { self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { @@ -285,7 +269,6 @@ impl LegacyProtoHandler { } else { let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: incoming[0].0.protocol_version(), - endpoint: self.endpoint.clone(), received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); @@ -399,7 +382,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "Legacy substream clogged".into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -413,7 +395,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "All substreams have been closed by the remote".into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -426,7 +407,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: format!("Error on the last substream: {:?}", err).into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -492,7 +472,6 @@ impl LegacyProtoHandler { ProtocolState::Opening { .. } => { let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: substream.protocol_version(), - endpoint: self.endpoint.clone(), received_handshake, }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs index 14de382c1bb..4ba9d9a0b74 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -203,6 +203,22 @@ impl NotifsOutHandler { } } + /// Returns `true` if there has been an attempt to open the substream, but the remote refused + /// the substream. + /// + /// Always returns `false` if the handler is in a disabled state. + pub fn is_refused(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => false, + State::Opening { .. } => false, + State::Refused => true, + State::Open { .. } => false, + State::Poisoned => false, + } + } + /// Returns the name of the protocol that we negotiate. pub fn protocol_name(&self) -> &[u8] { &self.protocol_name -- GitLab From 72102d25126732f55d8a106487ff071c9e58e8af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 24 Aug 2020 00:52:47 +0200 Subject: [PATCH 121/132] Make Ss58AddressFormat display less expressive (#6941) Instead of using the `Debug` implementation inside the `Display` implementation this pr changes it to display only the name of the format. --- primitives/core/src/crypto.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 77a339ac7c6..ba0ed12568d 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -328,7 +328,13 @@ macro_rules! ss58_address_format { #[cfg(feature = "std")] impl std::fmt::Display for Ss58AddressFormat { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self) + match self { + $( + Ss58AddressFormat::$identifier => write!(f, "{}", $name), + )* + Ss58AddressFormat::Custom(x) => write!(f, "{}", x), + } + } } @@ -419,10 +425,7 @@ macro_rules! ss58_address_format { #[cfg(feature = "std")] impl From for String { fn from(x: Ss58AddressFormat) -> String { - match x { - $(Ss58AddressFormat::$identifier => $name.into()),*, - Ss58AddressFormat::Custom(x) => x.to_string(), - } + x.to_string() } } ) -- GitLab From 735dc7c52ecd5c29a85fa3eba15a7ef7f0ff3f5f Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 24 Aug 2020 09:37:32 +0200 Subject: [PATCH 122/132] client/authority-discovery: Append PeerId to Multiaddr at most once (#6933) * client/authority-discovery/worker: Extract address getter * client/authority-discovery: Test for no duplicate p2p components * client/authority-discovery: Append PeerId to Multiaddr at most once When collecting the addresses to be published for the local node, `addresses_to_publish` adds the local nodes `PeerId` to each `Multiaddr`. Before doing so, ensure the `Multiaddr` does not already contain one. * client/authority-discovery: Remove explicit return --- Cargo.lock | 1 + client/authority-discovery/Cargo.toml | 1 + client/authority-discovery/src/worker.rs | 43 ++++++----- .../authority-discovery/src/worker/tests.rs | 71 ++++++++++++++++++- 4 files changed, 97 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18a7266d3cc..03fa142c5b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6250,6 +6250,7 @@ version = "0.8.0-rc6" dependencies = [ "bytes 0.5.6", "derive_more", + "either", "env_logger", "futures 0.3.5", "futures-timer 3.0.2", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index d154b356057..8c898ab4964 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -19,6 +19,7 @@ prost-build = "0.6.1" bytes = "0.5.0" codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } derive_more = "0.99.2" +either = "1.5.3" futures = "0.3.4" futures-timer = "3.0.1" libp2p = { version = "0.24.0", default-features = false, features = ["kad"] } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 16f19489f94..09cdedd93a1 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -30,7 +30,8 @@ use futures_timer::Delay; use addr_cache::AddrCache; use codec::Decode; -use libp2p::core::multiaddr; +use either::Either; +use libp2p::{core::multiaddr, multihash::Multihash}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; @@ -232,6 +233,26 @@ where } } + fn addresses_to_publish(&self) -> impl ExactSizeIterator { + match &self.sentry_nodes { + Some(addrs) => Either::Left(addrs.clone().into_iter()), + None => { + let peer_id: Multihash = self.network.local_peer_id().into(); + Either::Right( + self.network.external_addresses() + .into_iter() + .map(move |a| { + if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { + a + } else { + a.with(multiaddr::Protocol::P2p(peer_id.clone())) + } + }), + ) + } + } + } + /// Publish either our own or if specified the public addresses of our sentry nodes. fn publish_ext_addresses(&mut self) -> Result<()> { let key_store = match &self.role { @@ -242,29 +263,15 @@ where Role::Sentry => return Ok(()), }; - if let Some(metrics) = &self.metrics { - metrics.publish.inc() - } - - let addresses: Vec<_> = match &self.sentry_nodes { - Some(addrs) => addrs.clone().into_iter() - .map(|a| a.to_vec()) - .collect(), - None => self.network.external_addresses() - .into_iter() - .map(|a| a.with(multiaddr::Protocol::P2p( - self.network.local_peer_id().into(), - ))) - .map(|a| a.to_vec()) - .collect(), - }; + let addresses = self.addresses_to_publish(); if let Some(metrics) = &self.metrics { + metrics.publish.inc(); metrics.amount_last_published.set(addresses.len() as u64); } let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } + schema::AuthorityAddresses { addresses: addresses.map(|a| a.to_vec()).collect() } .encode(&mut serialized_addresses) .map_err(Error::EncodingProto)?; diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 68aadca7a7f..4b16b9040b8 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -168,6 +168,7 @@ sp_api::mock_impl_runtime_apis! { pub struct TestNetwork { peer_id: PeerId, + external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. pub put_value_call: Arc)>>>, @@ -179,6 +180,10 @@ impl Default for TestNetwork { fn default() -> Self { TestNetwork { peer_id: PeerId::random(), + external_addresses: vec![ + "/ip6/2001:db8::/tcp/30333" + .parse().unwrap(), + ], put_value_call: Default::default(), get_value_call: Default::default(), set_priority_group_call: Default::default(), @@ -212,7 +217,7 @@ impl NetworkStateInfo for TestNetwork { } fn external_addresses(&self) -> Vec { - vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()] + self.external_addresses.clone() } } @@ -691,3 +696,67 @@ fn do_not_cache_addresses_without_peer_id() { "Expect worker to only cache `Multiaddr`s with `PeerId`s.", ); } + +#[test] +fn addresses_to_publish_adds_p2p() { + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + + assert!(!matches!( + network.external_addresses().pop().unwrap().pop().unwrap(), + multiaddr::Protocol::P2p(_) + )); + + let (_to_worker, from_service) = mpsc::channel(0); + let worker = Worker::new( + from_service, + Arc::new(TestApi { + authorities: vec![], + }), + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(KeyStore::new()), + Some(prometheus_endpoint::Registry::new()), + ); + + assert!( + matches!( + worker.addresses_to_publish().next().unwrap().pop().unwrap(), + multiaddr::Protocol::P2p(_) + ), + "Expect `addresses_to_publish` to append `p2p` protocol component.", + ); +} + +/// Ensure [`Worker::addresses_to_publish`] does not add an additional `p2p` protocol component in +/// case one already exists. +#[test] +fn addresses_to_publish_respects_existing_p2p_protocol() { + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(TestNetwork { + external_addresses: vec![ + "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" + .parse().unwrap(), + ], + .. Default::default() + }); + + let (_to_worker, from_service) = mpsc::channel(0); + let worker = Worker::new( + from_service, + Arc::new(TestApi { + authorities: vec![], + }), + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(KeyStore::new()), + Some(prometheus_endpoint::Registry::new()), + ); + + assert_eq!( + network.external_addresses, worker.addresses_to_publish().collect::>(), + "Expected Multiaddr from `TestNetwork` to not be altered.", + ); +} -- GitLab From d3ab36df62f149bb4012c7312a4be517cc4754db Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Mon, 24 Aug 2020 19:38:18 +1200 Subject: [PATCH 123/132] expose Deposit (#6943) --- frame/indices/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index c99beb463bc..e03cf4f1eea 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -121,6 +121,9 @@ decl_error! { decl_module! { pub struct Module for enum Call where origin: T::Origin, system = frame_system { + /// The deposit needed for reserving an index. + const Deposit: BalanceOf = T::Deposit::get(); + fn deposit_event() = default; /// Assign an previously unassigned index. -- GitLab From c3fcedded444bc0a4cea6f2ad36cd5f8e108cd70 Mon Sep 17 00:00:00 2001 From: Ashley Date: Mon, 24 Aug 2020 10:34:16 +0200 Subject: [PATCH 124/132] Add a `LightSyncState` field to the chain spec (#6894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Reset code, almost ready for PR * Improved build_hardcoded_spec * Fix line widths * Fix tests * Fix sc-service-test * Suggestions from code review * Rename to LightSyncState * It's not syncing :^( * It syncs! * Remove rpc call * Convert spaces to tabs * Moved sc-service things to export_sync_state.rs * Fix tests * Wait for syncing with network_status_sinks * Remove sc-network from node-template * Apply suggestions from code review Co-authored-by: Bastian Köcher * Various changes, split the flag up into 2 pieces to make testing easier. * Update client/cli/src/commands/build_spec_cmd.rs Co-authored-by: Bastian Köcher * Revert a lot of changes Co-authored-by: Bastian Köcher --- Cargo.lock | 1 + client/api/src/in_mem.rs | 18 ++--- client/api/src/light.rs | 19 +++-- client/chain-spec/Cargo.toml | 1 + client/chain-spec/src/chain_spec.rs | 78 +++++++++++++++++--- client/chain-spec/src/lib.rs | 6 +- client/db/src/light.rs | 38 +++++----- client/service/src/chain_ops/build_spec.rs | 82 ++++++++++++++++++++++ client/service/src/chain_ops/mod.rs | 2 + client/service/test/src/client/light.rs | 20 +++--- 10 files changed, 212 insertions(+), 53 deletions(-) create mode 100644 client/service/src/chain_ops/build_spec.rs diff --git a/Cargo.lock b/Cargo.lock index 03fa142c5b7..af32d89981d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6324,6 +6324,7 @@ name = "sc-chain-spec" version = "2.0.0-rc6" dependencies = [ "impl-trait-for-tuples", + "parity-scale-codec", "sc-chain-spec-derive", "sc-network", "sc-telemetry", diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 306c3c2b2f1..13ff7a01f91 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -447,6 +447,16 @@ impl light::Storage for Blockchain Blockchain::finalize_header(self, id, None) } + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +impl light::ChtRootStorage for Blockchain { fn header_cht_root( &self, _cht_size: NumberFor, @@ -466,14 +476,6 @@ impl light::Storage for Blockchain .ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block))) .map(Some) } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } } /// In-memory operation. diff --git a/client/api/src/light.rs b/client/api/src/light.rs index b359c1149ee..f9aa002841c 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -232,7 +232,9 @@ pub trait FetchChecker: Send + Sync { /// Light client blockchain storage. -pub trait Storage: AuxStore + HeaderBackend + HeaderMetadata { +pub trait Storage: AuxStore + HeaderBackend + + HeaderMetadata + ChtRootStorage +{ /// Store new header. Should refuse to revert any finalized blocks. /// /// Takes new authorities, the leaf state of the new block, and @@ -254,6 +256,15 @@ pub trait Storage: AuxStore + HeaderBackend + HeaderMetada /// Get last finalized header. fn last_finalized(&self) -> ClientResult; + /// Get storage cache. + fn cache(&self) -> Option>>; + + /// Get storage usage statistics. + fn usage_info(&self) -> Option; +} + +/// Light client CHT root storage. +pub trait ChtRootStorage { /// Get headers CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). fn header_cht_root( &self, @@ -267,12 +278,6 @@ pub trait Storage: AuxStore + HeaderBackend + HeaderMetada cht_size: NumberFor, block: NumberFor, ) -> ClientResult>; - - /// Get storage cache. - fn cache(&self) -> Option>>; - - /// Get storage usage statistics. - fn usage_info(&self) -> Option; } /// Remote header. diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index fb0addf461a..fcfb80a720e 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -21,3 +21,4 @@ serde_json = "1.0.41" sp-runtime = { version = "2.0.0-rc6", path = "../../primitives/runtime" } sp-chain-spec = { version = "2.0.0-rc6", path = "../../primitives/chain-spec" } sc-telemetry = { version = "2.0.0-rc6", path = "../telemetry" } +codec = { package = "parity-scale-codec", version = "1.3.4" } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 52414f8687c..20811394c56 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . //! Substrate chain configurations. +#![warn(missing_docs)] use std::{borrow::Cow, fs::File, path::PathBuf, sync::Arc, collections::HashMap}; use serde::{Serialize, Deserialize}; @@ -26,6 +27,7 @@ use serde_json as json; use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; +use sp_runtime::traits::Block as BlockT; enum GenesisSource { File(PathBuf), @@ -157,6 +159,7 @@ struct ClientSpec { consensus_engine: (), #[serde(skip_serializing)] genesis: serde::de::IgnoredAny, + light_sync_state: Option, } /// A type denoting empty extensions. @@ -245,6 +248,7 @@ impl ChainSpec { extensions, consensus_engine: (), genesis: Default::default(), + light_sync_state: None, }; ChainSpec { @@ -257,6 +261,11 @@ impl ChainSpec { fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } + + /// Hardcode infomation to allow light clients to sync quickly into the chain spec. + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { + self.client_spec.light_sync_state = Some(light_sync_state); + } } impl ChainSpec { @@ -284,16 +293,15 @@ impl ChainSpec { } } -impl ChainSpec { - /// Dump to json string. - pub fn as_json(&self, raw: bool) -> Result { - #[derive(Serialize, Deserialize)] - struct Container { - #[serde(flatten)] - client_spec: ClientSpec, - genesis: Genesis, +#[derive(Serialize, Deserialize)] +struct JsonContainer { + #[serde(flatten)] + client_spec: ClientSpec, + genesis: Genesis, +} - }; +impl ChainSpec { + fn json_container(&self, raw: bool) -> Result, String> { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; @@ -313,10 +321,15 @@ impl ChainSpec { }, (_, genesis) => genesis, }; - let container = Container { + Ok(JsonContainer { client_spec: self.client_spec.clone(), genesis, - }; + }) + } + + /// Dump to json string. + pub fn as_json(&self, raw: bool) -> Result { + let container = self.json_container(raw)?; json::to_string_pretty(&container) .map_err(|e| format!("Error generating spec json: {}", e)) } @@ -378,6 +391,49 @@ where fn set_storage(&mut self, storage: Storage) { self.genesis = GenesisSource::Storage(storage); } + + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { + ChainSpec::set_light_sync_state(self, light_sync_state) + } +} + +/// Hardcoded infomation that allows light clients to sync quickly. +pub struct LightSyncState { + /// The header of the best finalized block. + pub header: ::Header, + /// A list of all CHTs in the chain. + pub chts: Vec<::Hash>, +} + +impl LightSyncState { + /// Convert into a `SerializableLightSyncState`. + pub fn to_serializable(&self) -> SerializableLightSyncState { + use codec::Encode; + + SerializableLightSyncState { + header: StorageData(self.header.encode()), + chts: self.chts.iter().map(|hash| StorageData(hash.encode())).collect(), + } + } + + /// Convert from a `SerializableLightSyncState`. + pub fn from_serializable(serialized: &SerializableLightSyncState) -> Result { + Ok(Self { + header: codec::Decode::decode(&mut &serialized.header.0[..])?, + chts: serialized.chts.iter() + .map(|cht| codec::Decode::decode(&mut &cht.0[..])) + .collect::>()?, + }) + } +} + +/// The serializable form of `LightSyncState`. Created using `LightSyncState::serialize`. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct SerializableLightSyncState { + header: StorageData, + chts: Vec, } #[cfg(test)] diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 8901a9a6822..f5afe496f19 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -108,7 +108,9 @@ mod chain_spec; mod extension; -pub use chain_spec::{ChainSpec as GenericChainSpec, NoExtension}; +pub use chain_spec::{ + ChainSpec as GenericChainSpec, NoExtension, LightSyncState, SerializableLightSyncState, +}; pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; pub use sp_chain_spec::{Properties, ChainType}; @@ -155,6 +157,8 @@ pub trait ChainSpec: BuildStorage + Send { /// /// This will be used as storage at genesis. fn set_storage(&mut self, storage: Storage); + /// Hardcode infomation to allow light clients to sync quickly into the chain spec. + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState); } impl std::fmt::Debug for dyn ChainSpec { diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 139ecf3b22c..adf9a98d35e 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -27,7 +27,7 @@ use sc_client_api::{ blockchain::{ BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, }, - Storage + Storage, ChtRootStorage, }; use sp_blockchain::{ CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, @@ -523,22 +523,6 @@ impl Storage for LightStorage } } - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { if let Some(header) = self.header(id)? { let mut transaction = Transaction::new(); @@ -612,6 +596,26 @@ impl Storage for LightStorage } } +impl ChtRootStorage for LightStorage + where Block: BlockT, +{ + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) + } +} + /// Build the key for inserting header-CHT at given block. fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { let mut key = [cht_type; 5]; diff --git a/client/service/src/chain_ops/build_spec.rs b/client/service/src/chain_ops/build_spec.rs new file mode 100644 index 00000000000..c84c1c754ad --- /dev/null +++ b/client/service/src/chain_ops/build_spec.rs @@ -0,0 +1,82 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use sp_runtime::traits::{Block as BlockT, NumberFor, Saturating, One}; +use sp_blockchain::HeaderBackend; +use crate::{TFullBackend, TLightBackend}; +use std::sync::Arc; +use sp_runtime::generic::BlockId; + +/// An error for if this function is being called on a full node. +pub const CHT_ROOT_ERROR: &str = + "Backend doesn't store CHT roots. Make sure you're calling this on a light client."; + +/// Something that might allow access to a `ChtRootStorage`. +pub trait MaybeChtRootStorageProvider { + /// Potentially get a reference to a `ChtRootStorage`. + fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage>; +} + +impl MaybeChtRootStorageProvider for TFullBackend { + fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage> { + None + } +} + +impl MaybeChtRootStorageProvider for TLightBackend { + fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage> { + Some(self.blockchain().storage()) + } +} + +/// Build a `LightSyncState` from the CHT roots stored in a backend. +pub fn build_light_sync_state( + client: Arc, + backend: Arc, +) -> Result, sp_blockchain::Error> + where + TBl: BlockT, + TCl: HeaderBackend, + TBackend: MaybeChtRootStorageProvider, +{ + let storage = backend.cht_root_storage().ok_or(CHT_ROOT_ERROR)?; + + let finalized_hash = client.info().finalized_hash; + let finalized_number = client.info().finalized_number; + + use sc_client_api::cht; + + let mut chts = Vec::new(); + + // We can't fetch a CHT root later than `finalized_number - 2 * cht_size`. + let cht_size_x_2 = cht::size::>() * NumberFor::::from(2); + + let mut number = NumberFor::::one(); + + while number <= finalized_number.saturating_sub(cht_size_x_2) { + match storage.header_cht_root(cht::size(), number)? { + Some(cht_root) => chts.push(cht_root), + None => log::error!("No CHT found for block {}", number), + } + + number += cht::size(); + } + + Ok(sc_chain_spec::LightSyncState { + header: client.header(BlockId::Hash(finalized_hash))?.unwrap(), + chts, + }) +} diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs index af6e6f632fc..19f5e346820 100644 --- a/client/service/src/chain_ops/mod.rs +++ b/client/service/src/chain_ops/mod.rs @@ -21,9 +21,11 @@ mod export_blocks; mod export_raw_state; mod import_blocks; mod revert_chain; +mod build_spec; pub use check_block::*; pub use export_blocks::*; pub use export_raw_state::*; pub use import_blocks::*; pub use revert_chain::*; +pub use build_spec::*; diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index ffc84ad47b8..515d7d15326 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -42,7 +42,7 @@ use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVer use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; use sc_client_api::{ blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ChtRootStorage, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, @@ -164,6 +164,16 @@ impl Storage for DummyStorage { Err(ClientError::Backend("Test error".into())) } + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +impl ChtRootStorage for DummyStorage { fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { Err(ClientError::Backend("Test error".into())) } @@ -177,14 +187,6 @@ impl Storage for DummyStorage { ).into()) .map(Some) } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } } struct DummyCallExecutor; -- GitLab From 9f99d5f0c9e84f9dd554c03073e865ebdb4ae01b Mon Sep 17 00:00:00 2001 From: Ashley Date: Mon, 24 Aug 2020 15:11:21 +0200 Subject: [PATCH 125/132] Dynamically generate CHT roots on a full client (#6944) * Generate CHT roots on a full client * add changes_trie_root function * Add a test * Line widths * Fix sc-service-test * Clarify comments * Revert comments --- client/api/src/backend.rs | 18 +++++ client/api/src/in_mem.rs | 4 +- client/api/src/light.rs | 21 +---- client/db/src/lib.rs | 91 +++++++++++++++++++++- client/db/src/light.rs | 6 +- client/light/src/blockchain.rs | 20 ++++- client/service/src/chain_ops/build_spec.rs | 31 ++------ client/service/test/src/client/light.rs | 4 +- 8 files changed, 139 insertions(+), 56 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index efc5ca4ee8c..47fec977f5e 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -536,3 +536,21 @@ pub fn changes_tries_state_at_block<'a, Block: BlockT>( None => Ok(None), } } + +/// Provide CHT roots. These are stored on a light client and generated dynamically on a full +/// client. +pub trait ProvideChtRoots { + /// Get headers CHT root for given block. Returns None if the block is not a part of any CHT. + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result>; + + /// Get changes trie CHT root for given block. Returns None if the block is not a part of any CHT. + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result>; +} diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 13ff7a01f91..ded030fb804 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -35,7 +35,7 @@ use sp_state_machine::{ use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use crate::{ - backend::{self, NewBlockState}, + backend::{self, NewBlockState, ProvideChtRoots}, blockchain::{ self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId }, @@ -456,7 +456,7 @@ impl light::Storage for Blockchain } } -impl light::ChtRootStorage for Blockchain { +impl ProvideChtRoots for Blockchain { fn header_cht_root( &self, _cht_size: NumberFor, diff --git a/client/api/src/light.rs b/client/api/src/light.rs index f9aa002841c..144851dac00 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -32,7 +32,7 @@ use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, Error as ClientError, Result as ClientResult, }; -use crate::{backend::{AuxStore, NewBlockState}, UsageInfo}; +use crate::{backend::{AuxStore, NewBlockState}, UsageInfo, ProvideChtRoots}; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -233,7 +233,7 @@ pub trait FetchChecker: Send + Sync { /// Light client blockchain storage. pub trait Storage: AuxStore + HeaderBackend - + HeaderMetadata + ChtRootStorage + + HeaderMetadata + ProvideChtRoots { /// Store new header. Should refuse to revert any finalized blocks. /// @@ -263,23 +263,6 @@ pub trait Storage: AuxStore + HeaderBackend fn usage_info(&self) -> Option; } -/// Light client CHT root storage. -pub trait ChtRootStorage { - /// Get headers CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; - - /// Get changes trie CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; -} - /// Remote header. #[derive(Debug)] pub enum LocalOrRemote { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d854c80bf35..bd438f4dd71 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -54,8 +54,8 @@ use std::collections::{HashMap, HashSet}; use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, - backend::{NewBlockState, PrunableStateChangesTrieStorage}, - leaves::{LeafSet, FinalizationDisplaced}, + backend::{NewBlockState, PrunableStateChangesTrieStorage, ProvideChtRoots}, + leaves::{LeafSet, FinalizationDisplaced}, cht, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, @@ -70,7 +70,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; -use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Storage}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; @@ -405,6 +405,14 @@ impl BlockchainDb { meta.finalized_hash = hash; } } + + // Get block changes trie root, if available. + fn changes_trie_root(&self, block: BlockId) -> ClientResult> { + self.header(block) + .map(|header| header.and_then(|header| + header.digest().log(DigestItem::as_changes_trie_root) + .cloned())) + } } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { @@ -525,6 +533,58 @@ impl HeaderMetadata for BlockchainDb { } } +impl ProvideChtRoots for BlockchainDb { + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + let cht_number = match cht::block_to_cht_number(cht_size, block) { + Some(number) => number, + None => return Ok(None), + }; + + let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); + + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + + cht::compute_root::, _>( + cht::size(), cht_number, cht_range.map(|num| self.hash(num)) + ).map(Some) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + let cht_number = match cht::block_to_cht_number(cht_size, block) { + Some(number) => number, + None => return Ok(None), + }; + + let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); + + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + + cht::compute_root::, _>( + cht::size(), + cht_number, + cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), + ).map(Some) + } +} + /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, @@ -2329,4 +2389,29 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap_err(); } } + + #[test] + fn header_cht_root_works() { + use sc_client_api::ProvideChtRoots; + + let backend = Backend::::new_test(10, 10); + + // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created + let mut prev_hash = insert_header(&backend, 0, Default::default(), None, Default::default()); + let cht_size: u64 = cht::size(); + for i in 1..1 + cht_size + cht_size + 1 { + prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); + } + + let blockchain = backend.blockchain(); + + let cht_root_1 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap().unwrap(); + let cht_root_2 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap().unwrap(); + let cht_root_3 = blockchain.header_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap().unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index adf9a98d35e..acfb6217ce9 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -23,11 +23,11 @@ use std::convert::TryInto; use parking_lot::RwLock; use sc_client_api::{ - cht, backend::{AuxStore, NewBlockState}, UsageInfo, + cht, backend::{AuxStore, NewBlockState, ProvideChtRoots}, UsageInfo, blockchain::{ BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, }, - Storage, ChtRootStorage, + Storage, }; use sp_blockchain::{ CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, @@ -596,7 +596,7 @@ impl Storage for LightStorage } } -impl ChtRootStorage for LightStorage +impl ProvideChtRoots for LightStorage where Block: BlockT, { fn header_cht_root( diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index 9d557db887d..3b5753f2849 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -29,7 +29,7 @@ use sp_blockchain::{ }; pub use sc_client_api::{ backend::{ - AuxStore, NewBlockState + AuxStore, NewBlockState, ProvideChtRoots, }, blockchain::{ Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, @@ -173,3 +173,21 @@ impl RemoteBlockchain for Blockchain })) } } + +impl, Block: BlockT> ProvideChtRoots for Blockchain { + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage().header_cht_root(cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage().changes_trie_cht_root(cht_size, block) + } +} diff --git a/client/service/src/chain_ops/build_spec.rs b/client/service/src/chain_ops/build_spec.rs index c84c1c754ad..40d591d81f0 100644 --- a/client/service/src/chain_ops/build_spec.rs +++ b/client/service/src/chain_ops/build_spec.rs @@ -16,31 +16,9 @@ use sp_runtime::traits::{Block as BlockT, NumberFor, Saturating, One}; use sp_blockchain::HeaderBackend; -use crate::{TFullBackend, TLightBackend}; use std::sync::Arc; use sp_runtime::generic::BlockId; - -/// An error for if this function is being called on a full node. -pub const CHT_ROOT_ERROR: &str = - "Backend doesn't store CHT roots. Make sure you're calling this on a light client."; - -/// Something that might allow access to a `ChtRootStorage`. -pub trait MaybeChtRootStorageProvider { - /// Potentially get a reference to a `ChtRootStorage`. - fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage>; -} - -impl MaybeChtRootStorageProvider for TFullBackend { - fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage> { - None - } -} - -impl MaybeChtRootStorageProvider for TLightBackend { - fn cht_root_storage(&self) -> Option<&dyn sc_client_api::light::ChtRootStorage> { - Some(self.blockchain().storage()) - } -} +use sc_client_api::ProvideChtRoots; /// Build a `LightSyncState` from the CHT roots stored in a backend. pub fn build_light_sync_state( @@ -50,9 +28,10 @@ pub fn build_light_sync_state( where TBl: BlockT, TCl: HeaderBackend, - TBackend: MaybeChtRootStorageProvider, + TBackend: sc_client_api::Backend, + >::Blockchain: ProvideChtRoots, { - let storage = backend.cht_root_storage().ok_or(CHT_ROOT_ERROR)?; + let cht_root_provider = backend.blockchain(); let finalized_hash = client.info().finalized_hash; let finalized_number = client.info().finalized_number; @@ -67,7 +46,7 @@ pub fn build_light_sync_state( let mut number = NumberFor::::one(); while number <= finalized_number.saturating_sub(cht_size_x_2) { - match storage.header_cht_root(cht::size(), number)? { + match cht_root_provider.header_cht_root(cht::size(), number)? { Some(cht_root) => chts.push(cht_root), None => log::error!("No CHT found for block {}", number), } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 515d7d15326..f38aef008e1 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -42,7 +42,7 @@ use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVer use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; use sc_client_api::{ blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ChtRootStorage, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ProvideChtRoots, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, @@ -173,7 +173,7 @@ impl Storage for DummyStorage { } } -impl ChtRootStorage for DummyStorage { +impl ProvideChtRoots for DummyStorage { fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { Err(ClientError::Backend("Test error".into())) } -- GitLab From bf843b8fca4818bc314563aa6ece58ca3bb57da8 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 24 Aug 2020 15:24:00 +0200 Subject: [PATCH 126/132] Enable verification logic when executing benchmarks (#6929) * Add `--verify` flag to benchmark execution * make it so `--verify` can be used for getting the actual benchmarks * undo manual testing * oops * use benchmark config struct * verify is default on, docs update * remove clone * improve formatting * fix test * bump impl for ci --- bin/node/runtime/src/lib.rs | 10 +- frame/benchmarking/src/lib.rs | 179 ++++++++++---------- frame/benchmarking/src/tests.rs | 23 ++- frame/benchmarking/src/utils.rs | 43 +++-- frame/staking/src/benchmarking.rs | 3 +- utils/frame/benchmarking-cli/src/command.rs | 1 + utils/frame/benchmarking-cli/src/lib.rs | 4 + 7 files changed, 150 insertions(+), 113 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e7842e5c4ba..17c02eca17b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1134,13 +1134,7 @@ impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - extra: bool, + config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. @@ -1170,7 +1164,7 @@ impl_runtime_apis! { ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist, extra); + let params = (&config, &whitelist); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index cebdcbcfecd..03d60dbec58 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -648,30 +648,11 @@ macro_rules! benchmark_backend { ] } - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - $( - let $common = $common_from; - )* - $( - // Prepare instance - let $param = components.iter() - .find(|&c| c.0 == $crate::BenchmarkParameter::$param) - .unwrap().1; - )* - $( - let $pre_id : $pre_ty = $pre_ex; - )* - $( $param_instancer ; )* - $( $post )* - - Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { + fn instance( + &self, + components: &[($crate::BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str> { $( let $common = $common_from; )* @@ -687,7 +668,13 @@ macro_rules! benchmark_backend { $( $param_instancer ; )* $( $post )* - Ok(Box::new(move || -> Result<(), &'static str> { $eval; $postcode; Ok(()) })) + Ok(Box::new(move || -> Result<(), &'static str> { + $eval; + if verify { + $postcode; + } + Ok(()) + })) } } }; @@ -736,26 +723,16 @@ macro_rules! selected_benchmark { } } - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - match self { - $( - Self::$bench => < - $bench as $crate::BenchmarkingSetup - >::instance(&$bench, components), - )* - } - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { + fn instance( + &self, + components: &[($crate::BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str> { match self { $( Self::$bench => < $bench as $crate::BenchmarkingSetup - >::verify(&$bench, components), + >::instance(&$bench, components, verify), )* } } @@ -791,7 +768,8 @@ macro_rules! impl_benchmark { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[$crate::TrackedStorageKey] + whitelist: &[$crate::TrackedStorageKey], + verify: bool, ) -> Result, &'static str> { // Map the input to the selected benchmark. let extrinsic = sp_std::str::from_utf8(extrinsic) @@ -826,6 +804,7 @@ macro_rules! impl_benchmark { repeat: u32, c: Vec<($crate::BenchmarkParameter, u32)>, results: &mut Vec<$crate::BenchmarkResults>, + verify: bool, | -> Result<(), &'static str> { // Run the benchmark `repeat` times. for _ in 0..repeat { @@ -833,7 +812,7 @@ macro_rules! impl_benchmark { // benchmark. let closure_to_benchmark = < SelectedBenchmark as $crate::BenchmarkingSetup - >::instance(&selected_benchmark, &c)?; + >::instance(&selected_benchmark, &c, verify)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { @@ -847,43 +826,49 @@ macro_rules! impl_benchmark { // Reset the read/write counter so we don't count operations in the setup process. $crate::benchmarking::reset_read_write_count(); - // Time the extrinsic logic. - frame_support::debug::trace!( - target: "benchmark", - "Start Benchmark: {:?}", c - ); + if verify { + closure_to_benchmark()?; + } else { + // Time the extrinsic logic. + frame_support::debug::trace!( + target: "benchmark", + "Start Benchmark: {:?}", c + ); - let start_extrinsic = $crate::benchmarking::current_time(); - closure_to_benchmark()?; - let finish_extrinsic = $crate::benchmarking::current_time(); - let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - // Commit the changes to get proper write count - $crate::benchmarking::commit_db(); - frame_support::debug::trace!( - target: "benchmark", - "End Benchmark: {} ns", elapsed_extrinsic - ); - let read_write_count = $crate::benchmarking::read_write_count(); - frame_support::debug::trace!( - target: "benchmark", - "Read/Write Count {:?}", read_write_count - ); + let start_extrinsic = $crate::benchmarking::current_time(); - // Time the storage root recalculation. - let start_storage_root = $crate::benchmarking::current_time(); - $crate::storage_root(); - let finish_storage_root = $crate::benchmarking::current_time(); - let elapsed_storage_root = finish_storage_root - start_storage_root; + closure_to_benchmark()?; - results.push($crate::BenchmarkResults { - components: c.clone(), - extrinsic_time: elapsed_extrinsic, - storage_root_time: elapsed_storage_root, - reads: read_write_count.0, - repeat_reads: read_write_count.1, - writes: read_write_count.2, - repeat_writes: read_write_count.3, - }); + let finish_extrinsic = $crate::benchmarking::current_time(); + let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); + frame_support::debug::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); + let read_write_count = $crate::benchmarking::read_write_count(); + frame_support::debug::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); + + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; + + results.push($crate::BenchmarkResults { + components: c.clone(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + }); + } // Wipe the DB back to the genesis state. $crate::benchmarking::wipe_db(); @@ -893,7 +878,11 @@ macro_rules! impl_benchmark { }; if components.is_empty() { - repeat_benchmark(repeat, Default::default(), &mut results)?; + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + repeat_benchmark(1, Default::default(), &mut Vec::new(), true)?; + } + repeat_benchmark(repeat, Default::default(), &mut results, false)?; } else { // Select the component we will be benchmarking. Each component will be benchmarked. for (idx, (name, low, high)) in components.iter().enumerate() { @@ -929,7 +918,11 @@ macro_rules! impl_benchmark { ) .collect(); - repeat_benchmark(repeat, c, &mut results)?; + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + repeat_benchmark(1, Default::default(), &mut Vec::new(), true)?; + } + repeat_benchmark(repeat, c, &mut results, false)?; } } } @@ -962,17 +955,17 @@ macro_rules! impl_benchmark_test { let execute_benchmark = | c: Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), &'static str> { - // Set up the verification state + // Set up the benchmark, return execution + verification function. let closure_to_verify = < SelectedBenchmark as $crate::BenchmarkingSetup - >::verify(&selected_benchmark, &c)?; + >::instance(&selected_benchmark, &c, true)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { frame_system::Module::::set_block_number(1.into()); } - // Run verification + // Run execution + verification closure_to_verify()?; // Reset the state @@ -1015,7 +1008,7 @@ macro_rules! impl_benchmark_test { /// First create an object that holds in the input parameters for the benchmark: /// /// ```ignore -/// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); +/// let params = (&config, &whitelist); /// ``` /// /// The `whitelist` is a parameter you pass to control the DB read/write tracking. @@ -1059,18 +1052,29 @@ macro_rules! impl_benchmark_test { macro_rules! add_benchmark { ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); - let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist, extra) = $params; + let (config, whitelist) = $params; + let $crate::BenchmarkConfig { + pallet, + benchmark, + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + extra, + } = config; if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { - for benchmark in $( $location )*::benchmarks(extra).into_iter() { + for benchmark in $( $location )*::benchmarks(*extra).into_iter() { $batches.push($crate::BenchmarkBatch { results: $( $location )*::run_benchmark( benchmark, &lowest_range_values[..], &highest_range_values[..], &steps[..], - repeat, + *repeat, whitelist, + *verify, )?, pallet: name_string.to_vec(), benchmark: benchmark.to_vec(), @@ -1083,8 +1087,9 @@ macro_rules! add_benchmark { &lowest_range_values[..], &highest_range_values[..], &steps[..], - repeat, + *repeat, whitelist, + *verify, )?, pallet: name_string.to_vec(), benchmark: benchmark.clone(), diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 127645d4305..94f35741007 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -176,10 +176,11 @@ fn benchmarks_macro_works() { let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { - assert_eq!(closure(), Ok(())); + assert_ok!(closure()); }); } @@ -193,6 +194,7 @@ fn benchmarks_macro_rename_works() { let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { @@ -210,9 +212,10 @@ fn benchmarks_macro_works_for_non_dispatchable() { let closure = >::instance( &selected, &[(BenchmarkParameter::x, 1)], + true, ).expect("failed to create closure"); - assert_eq!(closure(), Ok(())); + assert_ok!(closure()); } #[test] @@ -220,14 +223,28 @@ fn benchmarks_macro_verify_works() { // Check postcondition for benchmark `set_value` is valid. let selected = SelectedBenchmark::set_value; - let closure = >::verify( + let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { assert_ok!(closure()); }); + + // Check postcondition for benchmark `bad_verify` is invalid. + let selected = SelectedBenchmark::bad_verify; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 10000)], + true, + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_err!(closure(), "You forgot to sort!"); + }); } #[test] diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 8c25f035802..347334e24d5 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -63,19 +63,32 @@ pub struct BenchmarkResults { pub repeat_writes: u32, } +/// Configuration used to setup and run runtime benchmarks. +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkConfig { + /// The encoded name of the pallet to benchmark. + pub pallet: Vec, + /// The encoded name of the benchmark/extrinsic to run. + pub benchmark: Vec, + /// An optional manual override to the lowest values used in the `steps` range. + pub lowest_range_values: Vec, + /// An optional manual override to the highest values used in the `steps` range. + pub highest_range_values: Vec, + /// The number of samples to take across the range of values for components. + pub steps: Vec, + /// The number of times to repeat a benchmark. + pub repeat: u32, + /// Enable an extra benchmark iteration which runs the verification logic for a benchmark. + pub verify: bool, + /// Enable benchmarking of "extra" extrinsics, i.e. those that are not directly used in a pallet. + pub extra: bool, +} + sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. pub trait Benchmark { /// Dispatch the given benchmark. - fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - extra: bool, - ) -> Result, RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, RuntimeString>; } } @@ -175,7 +188,8 @@ pub trait Benchmarking { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[TrackedStorageKey] + whitelist: &[TrackedStorageKey], + verify: bool, ) -> Result, &'static str>; } @@ -188,10 +202,11 @@ pub trait BenchmarkingSetup { fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; /// Set up the storage, and prepare a closure to run the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; - - /// Set up the storage, and prepare a closure to test and verify the benchmark - fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + fn instance( + &self, + components: &[(BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str>; } /// Grab an account, seeded by a name and index. diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 77eecb2ef04..156b2f81c84 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -715,7 +715,8 @@ mod tests { let closure_to_benchmark = >::instance( &selected_benchmark, - &c + &c, + true ).unwrap(); assert_ok!(closure_to_benchmark()); diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 688e393bd60..b0791f88ce5 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -75,6 +75,7 @@ impl BenchmarkCmd { self.highest_range_values.clone(), self.steps.clone(), self.repeat, + !self.no_verify, self.extra, ).encode(), extensions, diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index c2a228fc86a..8cbb3c78687 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -72,6 +72,10 @@ pub struct BenchmarkCmd { #[structopt(long)] pub heap_pages: Option, + /// Disable verification logic when running benchmarks. + #[structopt(long)] + pub no_verify: bool, + /// Display and run extra benchmarks that would otherwise not be needed for weight construction. #[structopt(long)] pub extra: bool, -- GitLab From 243873a76fedc1f63004e8097b38f77470d2eff7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Mon, 24 Aug 2020 14:29:17 +0100 Subject: [PATCH 127/132] grandpa: always create and send justification if there are any subscribers (#6935) * grandpa: use bytes type for justification rpc notification * grandpa: always create justification if there are rpc subscribers * grandpa: wording * grandpa: replace notify_justification macro with function * grandpa: prefer Option<&T> over &Option --- client/finality-grandpa/rpc/Cargo.toml | 3 +- client/finality-grandpa/rpc/src/lib.rs | 4 +- .../finality-grandpa/rpc/src/notification.rs | 4 +- client/finality-grandpa/src/environment.rs | 68 ++++++++++++------- client/finality-grandpa/src/import.rs | 3 +- client/finality-grandpa/src/notification.rs | 23 +++++-- client/finality-grandpa/src/observer.rs | 9 ++- 7 files changed, 72 insertions(+), 42 deletions(-) diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index 28197405c8d..6f3014644ea 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -8,9 +8,10 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +sc-finality-grandpa = { version = "0.8.0-rc6", path = "../" } sc-rpc = { version = "2.0.0-rc6", path = "../../rpc" } +sp-core = { version = "2.0.0-rc6", path = "../../../primitives/core" } sp-runtime = { version = "2.0.0-rc6", path = "../../../primitives/runtime" } -sc-finality-grandpa = { version = "0.8.0-rc6", path = "../" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index c00c95c5f77..5606da42d59 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -406,7 +406,7 @@ mod tests { // Notify with a header and justification let justification = create_justification(); - let _ = justification_sender.notify(justification.clone()).unwrap(); + justification_sender.notify(|| Ok(justification.clone())).unwrap(); // Inspect what we received let recv = receiver.take(1).wait().flatten().collect::>(); @@ -418,7 +418,7 @@ mod tests { let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); - let recv_justification: Vec = + let recv_justification: sp_core::Bytes = serde_json::from_value(json_map["result"].take()).unwrap(); let recv_justification: GrandpaJustification = Decode::decode(&mut &recv_justification[..]).unwrap(); diff --git a/client/finality-grandpa/rpc/src/notification.rs b/client/finality-grandpa/rpc/src/notification.rs index 831f4681549..fd03a622b21 100644 --- a/client/finality-grandpa/rpc/src/notification.rs +++ b/client/finality-grandpa/rpc/src/notification.rs @@ -23,10 +23,10 @@ use sc_finality_grandpa::GrandpaJustification; /// An encoded justification proving that the given header has been finalized #[derive(Clone, Serialize, Deserialize)] -pub struct JustificationNotification(Vec); +pub struct JustificationNotification(sp_core::Bytes); impl From> for JustificationNotification { fn from(notification: GrandpaJustification) -> Self { - JustificationNotification(notification.encode()) + JustificationNotification(notification.encode().into()) } } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index a7a29fe0e8a..d8623727705 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -645,7 +645,8 @@ pub(crate) fn ancestry( client: &Arc, base: Block::Hash, block: Block::Hash, -) -> Result, GrandpaError> where +) -> Result, GrandpaError> +where Client: HeaderMetadata, { if base == block { return Err(GrandpaError::NotDescendent) } @@ -671,15 +672,14 @@ pub(crate) fn ancestry( Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl - voter::Environment> -for Environment +impl voter::Environment> + for Environment where Block: 'static, B: Backend, C: crate::ClientForGrandpa + 'static, C::Api: GrandpaApi, - N: NetworkT + 'static + Send + Sync, + N: NetworkT + 'static + Send + Sync, SC: SelectChain + 'static, VR: VotingRule, NumberFor: BlockNumberOps, @@ -1023,7 +1023,7 @@ where number, (round, commit).into(), false, - &self.justification_sender, + self.justification_sender.as_ref(), ) } @@ -1088,9 +1088,10 @@ pub(crate) fn finalize_block( number: NumberFor, justification_or_commit: JustificationOrCommit, initial_sync: bool, - justification_sender: &Option>, -) -> Result<(), CommandOrError>> where - Block: BlockT, + justification_sender: Option<&GrandpaJustificationSender>, +) -> Result<(), CommandOrError>> +where + Block: BlockT, BE: Backend, Client: crate::ClientForGrandpa, { @@ -1154,6 +1155,18 @@ pub(crate) fn finalize_block( } } + // send a justification notification if a sender exists and in case of error log it. + fn notify_justification( + justification_sender: Option<&GrandpaJustificationSender>, + justification: impl FnOnce() -> Result, Error>, + ) { + if let Some(sender) = justification_sender { + if let Err(err) = sender.notify(justification) { + warn!(target: "afg", "Error creating justification for subscriber: {:?}", err); + } + } + } + // NOTE: this code assumes that honest voters will never vote past a // transition block, thus we don't have to worry about the case where // we have a transition with `effective_block = N`, but we finalize @@ -1161,7 +1174,10 @@ pub(crate) fn finalize_block( // justifications for transition blocks which will be requested by // syncing clients. let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => Some(justification), + JustificationOrCommit::Justification(justification) => { + notify_justification(justification_sender, || Ok(justification.clone())); + Some(justification.encode()) + }, JustificationOrCommit::Commit((round_number, commit)) => { let mut justification_required = // justification is always required when block that enacts new authorities @@ -1181,29 +1197,31 @@ pub(crate) fn finalize_block( } } + // NOTE: the code below is a bit more verbose because we + // really want to avoid creating a justification if it isn't + // needed (e.g. if there's no subscribers), and also to avoid + // creating it twice. depending on the vote tree for the round, + // creating a justification might require multiple fetches of + // headers from the database. + let justification = || GrandpaJustification::from_commit( + &client, + round_number, + commit, + ); + if justification_required { - let justification = GrandpaJustification::from_commit( - &client, - round_number, - commit, - )?; + let justification = justification()?; + notify_justification(justification_sender, || Ok(justification.clone())); - Some(justification) + Some(justification.encode()) } else { + notify_justification(justification_sender, justification); + None } }, }; - // Notify any registered listeners in case we have a justification - if let Some(sender) = justification_sender { - if let Some(ref justification) = justification { - let _ = sender.notify(justification.clone()); - } - } - - let justification = justification.map(|j| j.encode()); - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); // ideally some handle to a synchronization oracle would be used diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d5b0a650096..04df95a3187 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -619,7 +619,6 @@ where Client: crate::ClientForGrandpa, NumberFor: finality_grandpa::BlockNumberOps, { - /// Import a block justification and finalize the block. /// /// If `enacts_change` is set to true, then finalizing this block *must* @@ -653,7 +652,7 @@ where number, justification.into(), initial_sync, - &Some(self.justification_sender.clone()), + Some(&self.justification_sender), ); match result { diff --git a/client/finality-grandpa/src/notification.rs b/client/finality-grandpa/src/notification.rs index 16f705f0eeb..84155830519 100644 --- a/client/finality-grandpa/src/notification.rs +++ b/client/finality-grandpa/src/notification.rs @@ -20,9 +20,10 @@ use std::sync::Arc; use parking_lot::Mutex; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use crate::justification::GrandpaJustification; +use crate::Error; // Stream of justifications returned when subscribing. type JustificationStream = TracingUnboundedReceiver>; @@ -54,10 +55,22 @@ impl GrandpaJustificationSender { /// Send out a notification to all subscribers that a new justification /// is available for a block. - pub fn notify(&self, notification: GrandpaJustification) -> Result<(), ()> { - self.subscribers.lock().retain(|n| { - !n.is_closed() && n.unbounded_send(notification.clone()).is_ok() - }); + pub fn notify( + &self, + justification: impl FnOnce() -> Result, Error>, + ) -> Result<(), Error> { + let mut subscribers = self.subscribers.lock(); + + // do an initial prune on closed subscriptions + subscribers.retain(|n| !n.is_closed()); + + // if there's no subscribers we avoid creating + // the justification which is a costly operation + if !subscribers.is_empty() { + let justification = justification()?; + subscribers.retain(|n| n.unbounded_send(justification.clone()).is_ok()); + } + Ok(()) } } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 8fb536a3697..6a9955aa86d 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -74,11 +74,10 @@ fn grandpa_observer( last_finalized_number: NumberFor, commits: S, note_round: F, -) -> impl Future>>> where +) -> impl Future>>> +where NumberFor: BlockNumberOps, - S: Stream< - Item = Result, CommandOrError>>, - >, + S: Stream, CommandOrError>>>, F: Fn(u64), BE: Backend, Client: crate::ClientForGrandpa, @@ -130,7 +129,7 @@ fn grandpa_observer( finalized_number, (round, commit).into(), false, - &justification_sender, + justification_sender.as_ref(), ) { Ok(_) => {}, Err(e) => return future::err(e), -- GitLab From 14cfc57d784956e320ee8fa6f81cc9e0e7ff66fb Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 24 Aug 2020 15:37:07 +0200 Subject: [PATCH 128/132] .maintain/monitoring/alerting-rules: Add fd alert (#6946) Alert on high file descriptor allocation. --- .../monitoring/alerting-rules/alerting-rules.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 12f46e17ad8..7f36fedb4ba 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -109,6 +109,19 @@ groups: message: 'The node {{ $labels.instance }} has less than 3 peers for more than 15 minutes' + ############################################################################## + # System + ############################################################################## + + - alert: HighNumberOfFileDescriptors + expr: 'node_filefd_allocated{domain=~"kusama|polkadot"} > 10000' + for: 3m + labels: + severity: warning + annotations: + message: 'The node {{ $labels.instance }} has more than 10_000 file + descriptors allocated for more than 3 minutes' + ############################################################################## # Others ############################################################################## -- GitLab From 719dbdf33248b7a0a7c5bdeca6660d266b9ecc4a Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 24 Aug 2020 15:40:16 +0200 Subject: [PATCH 129/132] Fix benchmark read/write key tracker for keys in child storages. (#6905) * WIP: read child trie and write child trie * add test * refactor a bit + improve log * better naming * trigger CI * Revert "trigger CI" This reverts commit d0aadaeb6a12fc6c39f01b3c1b5725d19f085865. --- client/db/src/bench.rs | 150 +++++++++++++++++++----- primitives/state-machine/src/backend.rs | 10 +- primitives/state-machine/src/ext.rs | 1 + 3 files changed, 128 insertions(+), 33 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 93b8048529f..1c9be87faa0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -30,7 +30,9 @@ use sp_core::{ }; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend, StorageCollection}; +use sp_state_machine::{ + DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection +}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -96,7 +98,11 @@ pub struct BenchmarkingState { genesis: HashMap, (Vec, i32)>, record: Cell>>, shared_cache: SharedCache, // shared cache is always empty - key_tracker: RefCell, KeyTracker>>, + /// Key tracker for keys in the main trie. + main_key_tracker: RefCell, KeyTracker>>, + /// Key tracker for keys in a child trie. + /// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`) + child_key_tracker: RefCell, HashMap, KeyTracker>>>, read_write_tracker: RefCell, whitelist: RefCell>, } @@ -116,7 +122,8 @@ impl BenchmarkingState { genesis_root: Default::default(), record: Default::default(), shared_cache: new_shared_cache(0, (1, 10)), - key_tracker: Default::default(), + main_key_tracker: Default::default(), + child_key_tracker: Default::default(), read_write_tracker: Default::default(), whitelist: Default::default(), }; @@ -134,7 +141,7 @@ impl BenchmarkingState { ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); - state.commit(root, transaction, Vec::new())?; + state.commit(root, transaction, Vec::new(), Vec::new())?; state.record.take(); Ok(state) } @@ -156,7 +163,7 @@ impl BenchmarkingState { } fn add_whitelist_to_tracker(&self) { - let mut key_tracker = self.key_tracker.borrow_mut(); + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); let whitelist = self.whitelist.borrow(); @@ -165,25 +172,29 @@ impl BenchmarkingState { has_been_read: key.has_been_read, has_been_written: key.has_been_written, }; - key_tracker.insert(key.key.clone(), whitelisted); + main_key_tracker.insert(key.key.clone(), whitelisted); }); } fn wipe_tracker(&self) { - *self.key_tracker.borrow_mut() = HashMap::new(); + *self.main_key_tracker.borrow_mut() = HashMap::new(); self.add_whitelist_to_tracker(); *self.read_write_tracker.borrow_mut() = Default::default(); } - fn add_read_key(&self, key: &[u8]) { - log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); - - let mut key_tracker = self.key_tracker.borrow_mut(); + // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) + fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - let maybe_tracker = key_tracker.get(key); + let key_tracker = if let Some(childtrie) = childtrie { + child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) + } else { + &mut main_key_tracker + }; - match maybe_tracker { + let read = match key_tracker.get(key) { None => { let has_been_read = KeyTracker { has_been_read: true, @@ -191,6 +202,7 @@ impl BenchmarkingState { }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); + true }, Some(tracker) => { if !tracker.has_been_read { @@ -200,20 +212,37 @@ impl BenchmarkingState { }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); + true } else { read_write_tracker.add_repeat_read(); + false } } + }; + + if read { + if let Some(childtrie) = childtrie { + log::trace!( + target: "benchmark", + "Childtrie Read: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) + ); + } else { + log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); + } } } - fn add_write_key(&self, key: &[u8]) { - log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); - - let mut key_tracker = self.key_tracker.borrow_mut(); + // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) + fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - let maybe_tracker = key_tracker.get(key); + let key_tracker = if let Some(childtrie) = childtrie { + child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) + } else { + &mut main_key_tracker + }; // If we have written to the key, we also consider that we have read from it. let has_been_written = KeyTracker { @@ -221,19 +250,33 @@ impl BenchmarkingState { has_been_written: true, }; - match maybe_tracker { + let write = match key_tracker.get(key) { None => { key_tracker.insert(key.to_vec(), has_been_written); read_write_tracker.add_write(); + true }, Some(tracker) => { if !tracker.has_been_written { key_tracker.insert(key.to_vec(), has_been_written); read_write_tracker.add_write(); + true } else { read_write_tracker.add_repeat_write(); + false } } + }; + + if write { + if let Some(childtrie) = childtrie { + log::trace!( + target: "benchmark", + "Childtrie Write: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) + ); + } else { + log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); + } } } } @@ -248,12 +291,12 @@ impl StateBackend> for BenchmarkingState { type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) } fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key) } @@ -262,12 +305,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) } @@ -276,12 +319,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key) } @@ -290,7 +333,7 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } @@ -367,9 +410,9 @@ impl StateBackend> for BenchmarkingState { fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction, - storage_changes: StorageCollection, - ) -> Result<(), Self::Error> - { + main_storage_changes: StorageCollection, + child_storage_changes: ChildStorageCollection, + ) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); let changes = transaction.drain(); @@ -390,8 +433,13 @@ impl StateBackend> for BenchmarkingState { self.db.set(Some(db)); // Track DB Writes - storage_changes.iter().for_each(|(key, _)| { - self.add_write_key(key); + main_storage_changes.iter().for_each(|(key, _)| { + self.add_write_key(None, key); + }); + child_storage_changes.iter().for_each(|(child_storage_key, storage_changes)| { + storage_changes.iter().for_each(|(key, _)| { + self.add_write_key(Some(child_storage_key), key); + }) }); } else { return Err("Trying to commit to a closed db".into()) @@ -453,3 +501,43 @@ impl std::fmt::Debug for BenchmarkingState { write!(f, "Bench DB") } } + +#[cfg(test)] +mod test { + use crate::bench::BenchmarkingState; + use sp_state_machine::backend::Backend as _; + + #[test] + fn read_to_main_and_child_tries() { + let bench_state = BenchmarkingState::::new(Default::default(), None) + .unwrap(); + + let child1 = sp_core::storage::ChildInfo::new_default(b"child1"); + let child2 = sp_core::storage::ChildInfo::new_default(b"child2"); + + bench_state.storage(b"foo").unwrap(); + bench_state.child_storage(&child1, b"foo").unwrap(); + bench_state.child_storage(&child2, b"foo").unwrap(); + + bench_state.storage(b"bar").unwrap(); + bench_state.child_storage(&child1, b"bar").unwrap(); + bench_state.child_storage(&child2, b"bar").unwrap(); + + bench_state.commit( + Default::default(), + Default::default(), + vec![ + ("foo".as_bytes().to_vec(), None) + ], + vec![ + ("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)]) + ] + ).unwrap(); + + let rw_tracker = bench_state.read_write_tracker.borrow(); + assert_eq!(rw_tracker.reads, 6); + assert_eq!(rw_tracker.repeat_reads, 0); + assert_eq!(rw_tracker.writes, 2); + assert_eq!(rw_tracker.repeat_writes, 0); + } +} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index cfff2c6fc69..6ced5ed0e52 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,7 +26,7 @@ use sp_core::{ use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, + UsageInfo, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; /// A state backend is used to read state data and can have changes committed @@ -215,7 +215,13 @@ pub trait Backend: std::fmt::Debug { } /// Commit given transaction to storage. - fn commit(&self, _: H::Out, _: Self::Transaction, _: StorageCollection) -> Result<(), Self::Error> { + fn commit( + &self, + _: H::Out, + _: Self::Transaction, + _: StorageCollection, + _: ChildStorageCollection, + ) -> Result<(), Self::Error> { unimplemented!() } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index e57636b300a..e36964716f8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -594,6 +594,7 @@ where changes.transaction_storage_root, changes.transaction, changes.main_storage_changes, + changes.child_storage_changes, ).expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay -- GitLab From 295f874f0b8366700b7b19b2f2cafcceae009051 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 24 Aug 2020 16:17:39 +0200 Subject: [PATCH 130/132] client/authority-discovery: Limit number of addresses per authority (#6947) * client/authority-discovery: Test addresses per authority limit * client/authority-discovery: Limit number of addresses per authority --- client/authority-discovery/src/worker.rs | 4 ++ .../authority-discovery/src/worker/tests.rs | 66 +++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 09cdedd93a1..232e59d08dd 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -67,6 +67,9 @@ const LIBP2P_KADEMLIA_BOOTSTRAP_TIME: Duration = Duration::from_secs(30); /// discovery module. const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; +/// Maximum number of addresses cached per authority. Additional addresses are discarded. +const MAX_ADDRESSES_PER_AUTHORITY: usize = 10; + /// Role an authority discovery module can run as. pub enum Role { /// Actual authority as well as a reference to its key store. @@ -496,6 +499,7 @@ where false // `protocol` is not a [`Protocol::P2p`], let's keep looking. })) + .take(MAX_ADDRESSES_PER_AUTHORITY) .collect(); if !remote_addresses.is_empty() { diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 4b16b9040b8..baa6bd0fc7d 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -617,6 +617,72 @@ fn never_add_own_address_to_priority_group() { ); } +#[test] +fn limit_number_of_addresses_added_to_cache_per_authority() { + let remote_key_store = KeyStore::new(); + let remote_public = remote_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + + let dht_event = { + let addresses = (0..100).map(|_| { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )).to_vec() + }).collect(); + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let signature = remote_key_store.read() + .sign_with( + key_types::AUTHORITY_DISCOVERY, + &remote_public.clone().into(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing) + .unwrap(); + + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let key = hash_authority_id(&remote_public.to_raw_vec()); + let value = signed_addresses; + (key, value) + }; + + let (_dht_event_tx, dht_event_rx) = channel(1); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + Arc::new(TestApi { authorities: vec![remote_public.into()] }), + Arc::new(TestNetwork::default()), + vec![], + dht_event_rx.boxed(), + Role::Sentry, + None, + ); + + worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); + assert_eq!( + MAX_ADDRESSES_PER_AUTHORITY, + worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()).unwrap().len(), + ); +} + #[test] fn do_not_cache_addresses_without_peer_id() { let remote_key_store = KeyStore::new(); -- GitLab From a063383fed22eb08703475317b742c84a8dc4cc3 Mon Sep 17 00:00:00 2001 From: Swezey Date: Mon, 24 Aug 2020 09:22:25 -0500 Subject: [PATCH 131/132] =?UTF-8?q?=E2=9B=93=20=E2=9C=A8Add=20ShiftNrg=20N?= =?UTF-8?q?etwork=20SS58=20address=20type=20(#6942)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- primitives/core/src/crypto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index ba0ed12568d..a8d84eb57cf 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -467,6 +467,8 @@ ss58_address_format!( (18, "darwinia", "Darwinia Chain mainnet, standard account (*25519).") StafiAccount => (20, "stafi", "Stafi mainnet, standard account (*25519).") + ShiftNrg => + (23, "shift", "ShiftNrg mainnet, standard account (*25519).") SubsocialAccount => (28, "subsocial", "Subsocial network, standard account (*25519).") PhalaAccount => -- GitLab From 43c0b11696cb7702b26794707c29ea9f498bbfdb Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 24 Aug 2020 17:33:15 +0200 Subject: [PATCH 132/132] update tracing attribute (#6950) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af32d89981d..65f5935a1e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9395,9 +9395,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ "proc-macro2", "quote", -- GitLab

> for Author Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, Client::Api: SessionKeys, { - type Metadata = crate::metadata::Metadata; + type Metadata = crate::Metadata; fn insert_key( &self, diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index 8b6bf19d235..cb67d9ba231 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -106,7 +106,7 @@ trait ChainBackend: Send + Sync + 'static /// All new head subscription fn subscribe_all_heads( &self, - _metadata: crate::metadata::Metadata, + _metadata: crate::Metadata, subscriber: Subscriber, ) { subscribe_headers( @@ -123,7 +123,7 @@ trait ChainBackend: Send + Sync + 'static /// Unsubscribe from all head subscription. fn unsubscribe_all_heads( &self, - _metadata: Option, + _metadata: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions().cancel(id)) @@ -132,7 +132,7 @@ trait ChainBackend: Send + Sync + 'static /// New best head subscription fn subscribe_new_heads( &self, - _metadata: crate::metadata::Metadata, + _metadata: crate::Metadata, subscriber: Subscriber, ) { subscribe_headers( @@ -150,7 +150,7 @@ trait ChainBackend: Send + Sync + 'static /// Unsubscribe from new best head subscription. fn unsubscribe_new_heads( &self, - _metadata: Option, + _metadata: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions().cancel(id)) @@ -159,7 +159,7 @@ trait ChainBackend: Send + Sync + 'static /// Finalized head subscription fn subscribe_finalized_heads( &self, - _metadata: crate::metadata::Metadata, + _metadata: crate::Metadata, subscriber: Subscriber, ) { subscribe_headers( @@ -176,7 +176,7 @@ trait ChainBackend: Send + Sync + 'static /// Unsubscribe from finalized head subscription. fn unsubscribe_finalized_heads( &self, - _metadata: Option, + _metadata: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions().cancel(id)) @@ -230,7 +230,7 @@ impl ChainApi, Block::Hash, Block::Header, Signe Block: BlockT + 'static, Client: HeaderBackend + BlockchainEvents + 'static, { - type Metadata = crate::metadata::Metadata; + type Metadata = crate::Metadata; fn header(&self, hash: Option) -> FutureResult> { self.backend.header(hash) diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 22dccbaa10a..434859a39c2 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -27,10 +27,7 @@ use rpc::futures::future::{Executor, ExecuteError, Future}; use sp_core::traits::SpawnNamed; use std::sync::Arc; -mod metadata; - -pub use sc_rpc_api::DenyUnsafe; -pub use self::metadata::Metadata; +pub use sc_rpc_api::{DenyUnsafe, Metadata}; pub use rpc::IoHandlerExtension as RpcExtension; pub mod author; @@ -38,8 +35,9 @@ pub mod chain; pub mod offchain; pub mod state; pub mod system; -#[cfg(test)] -mod testing; + +#[cfg(any(test, feature = "test-helpers"))] +pub mod testing; /// Task executor that is being used by RPC subscriptions. #[derive(Clone)] diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 921cc7efc69..ded87c73dc8 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -140,21 +140,21 @@ pub trait StateBackend: Send + Sync + 'static /// New runtime version subscription fn subscribe_runtime_version( &self, - _meta: crate::metadata::Metadata, + _meta: crate::Metadata, subscriber: Subscriber, ); /// Unsubscribe from runtime version subscription fn unsubscribe_runtime_version( &self, - _meta: Option, + _meta: Option, id: SubscriptionId, ) -> RpcResult; /// New storage subscription fn subscribe_storage( &self, - _meta: crate::metadata::Metadata, + _meta: crate::Metadata, subscriber: Subscriber>, keys: Option>, ); @@ -162,7 +162,7 @@ pub trait StateBackend: Send + Sync + 'static /// Unsubscribe from storage subscription fn unsubscribe_storage( &self, - _meta: Option, + _meta: Option, id: SubscriptionId, ) -> RpcResult; } @@ -230,7 +230,7 @@ impl StateApi for State Block: BlockT + 'static, Client: Send + Sync + 'static, { - type Metadata = crate::metadata::Metadata; + type Metadata = crate::Metadata; fn call(&self, method: String, data: Bytes, block: Option) -> FutureResult { self.backend.call(block, method, data) @@ -390,7 +390,7 @@ impl ChildStateApi for ChildState Block: BlockT + 'static, Client: Send + Sync + 'static, { - type Metadata = crate::metadata::Metadata; + type Metadata = crate::Metadata; fn storage( &self, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index f0ae79a033b..3a5580c539a 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -373,7 +373,7 @@ impl StateBackend for FullState, ) { let stream = match self.client.storage_changes_notification_stream( @@ -424,7 +424,7 @@ impl StateBackend for FullState, + _meta: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions.cancel(id)) @@ -432,7 +432,7 @@ impl StateBackend for FullState>, keys: Option>, ) { @@ -484,7 +484,7 @@ impl StateBackend for FullState, + _meta: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions.cancel(id)) diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index c7e218541aa..01fd1c16018 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -289,7 +289,7 @@ impl StateBackend for LightState>, keys: Option> ) { @@ -384,7 +384,7 @@ impl StateBackend for LightState, + _meta: Option, id: SubscriptionId, ) -> RpcResult { if !self.subscriptions.cancel(id.clone()) { @@ -412,7 +412,7 @@ impl StateBackend for LightState, ) { self.subscriptions.add(subscriber, move |sink| { @@ -459,7 +459,7 @@ impl StateBackend for LightState, + _meta: Option, id: SubscriptionId, ) -> RpcResult { Ok(self.subscriptions.cancel(id)) diff --git a/client/rpc/src/testing.rs b/client/rpc/src/testing.rs index afca07a7fbe..9530ff00206 100644 --- a/client/rpc/src/testing.rs +++ b/client/rpc/src/testing.rs @@ -32,6 +32,7 @@ lazy_static::lazy_static! { type Boxed01Future01 = Box + Send + 'static>; +/// Executor for use in testing pub struct TaskExecutor; impl future01::Executor for TaskExecutor { fn execute( diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index fc9303498d6..eedc4582299 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -73,17 +73,17 @@ pub trait RpcExtensionBuilder { /// Returns an instance of the RPC extension for a particular `DenyUnsafe` /// value, e.g. the RPC extension might not expose some unsafe methods. - fn build(&self, deny: sc_rpc::DenyUnsafe) -> Self::Output; + fn build(&self, deny: sc_rpc::DenyUnsafe, subscriptions: SubscriptionManager) -> Self::Output; } impl RpcExtensionBuilder for F where - F: Fn(sc_rpc::DenyUnsafe) -> R, + F: Fn(sc_rpc::DenyUnsafe, SubscriptionManager) -> R, R: sc_rpc::RpcExtension, { type Output = R; - fn build(&self, deny: sc_rpc::DenyUnsafe) -> Self::Output { - (*self)(deny) + fn build(&self, deny: sc_rpc::DenyUnsafe, subscriptions: SubscriptionManager) -> Self::Output { + (*self)(deny, subscriptions) } } @@ -97,7 +97,7 @@ impl RpcExtensionBuilder for NoopRpcExtensionBuilder where { type Output = R; - fn build(&self, _deny: sc_rpc::DenyUnsafe) -> Self::Output { + fn build(&self, _deny: sc_rpc::DenyUnsafe, _subscriptions: SubscriptionManager) -> Self::Output { self.0.clone() } } @@ -764,7 +764,7 @@ fn gen_handler( let author = sc_rpc::author::Author::new( client, transaction_pool, - subscriptions, + subscriptions.clone(), keystore, deny_unsafe, ); @@ -786,7 +786,7 @@ fn gen_handler( maybe_offchain_rpc, author::AuthorApi::to_delegate(author), system::SystemApi::to_delegate(system), - rpc_extensions_builder.build(deny_unsafe), + rpc_extensions_builder.build(deny_unsafe, subscriptions), )) } -- GitLab From 2258733ea36c4da456d911967877baaa663d2461 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 10 Aug 2020 13:48:43 +0200 Subject: [PATCH 071/132] Remove minimum_validator_count stale const (#6864) --- frame/staking/src/benchmarking.rs | 6 +----- frame/staking/src/lib.rs | 4 +--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index d92cd871791..aab92ef4ce5 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -55,8 +55,6 @@ pub fn create_validator_with_nominators( let mut points_total = 0; let mut points_individual = Vec::new(); - MinimumValidatorCount::put(0); - let (v_stash, v_controller) = create_stash_controller::(0, 100)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), @@ -356,7 +354,7 @@ benchmarks! { new_era { let v in 1 .. 10; let n in 1 .. 100; - MinimumValidatorCount::put(0); + create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; let session_index = SessionIndex::one(); }: { @@ -393,7 +391,6 @@ benchmarks! { payout_all { let v in 1 .. 10; let n in 1 .. 100; - MinimumValidatorCount::put(0); create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; // Start a new Era let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); @@ -577,7 +574,6 @@ benchmarks! { // number of nominator intent let n in 1000 .. 2000; - MinimumValidatorCount::put(0); create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; // needed for the solution to be generates. diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 60943be82f1..3e89df78414 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -330,7 +330,6 @@ use sp_npos_elections::{ is_score_better, VotingLimit, SupportMap, VoteWeight, }; -const DEFAULT_MINIMUM_VALIDATOR_COUNT: u32 = 4; const STAKING_ID: LockIdentifier = *b"staking "; pub const MAX_UNLOCKING_CHUNKS: usize = 32; pub const MAX_NOMINATIONS: usize = ::LIMIT; @@ -1032,8 +1031,7 @@ decl_storage! { pub ValidatorCount get(fn validator_count) config(): u32; /// Minimum number of staking participants before emergency conditions are imposed. - pub MinimumValidatorCount get(fn minimum_validator_count) config(): - u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; + pub MinimumValidatorCount get(fn minimum_validator_count) config(): u32; /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're /// easy to initialize and the performance hit is minimal (we expect no more than four -- GitLab From 5f9e35531a854effbde948f9cb7df6ccd1ae2ddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 10 Aug 2020 15:14:34 +0200 Subject: [PATCH 072/132] seal: Change prefix and module name from "ext_" to "seal_" for contract callable functions (#6798) * seal: Change prefix "ext_" to "seal_" for contract callable functions The word Ext is a overloaded term in the context of substrate. It usually is a trait which abstracts away access to external resources usually in order to mock them away for the purpose of tests. The contract module has its own `Ext` trait in addition the the substrate `Ext` which makes things even more confusing. In order to differentiate the contract callable functions more clearly from this `Ext` concept we rename them to use the "seal_" prefix instead. This should change no behaviour at all. This is a pure renaming commit. * seal: Rename import module from "env" to "seal0" * seal: Fixup integration test * seal: Add more tests for new import module names --- Cargo.lock | 4 +- bin/node/executor/tests/basic.rs | 10 +- frame/contracts/COMPLEXITY.md | 46 +++--- frame/contracts/Cargo.toml | 2 +- frame/contracts/fixtures/call_return_code.wat | 12 +- frame/contracts/fixtures/caller_contract.wat | 26 ++-- .../fixtures/check_default_rent_allowance.wat | 6 +- frame/contracts/fixtures/crypto_hashes.wat | 24 ++-- .../fixtures/destroy_and_transfer.wat | 26 ++-- frame/contracts/fixtures/drain.wat | 10 +- .../fixtures/instantiate_return_code.wat | 12 +- frame/contracts/fixtures/ok_trap_revert.wat | 8 +- frame/contracts/fixtures/restoration.wat | 18 +-- .../fixtures/return_from_start_fn.wat | 8 +- frame/contracts/fixtures/return_with_data.wat | 8 +- frame/contracts/fixtures/self_destruct.wat | 22 +-- .../fixtures/self_destructing_constructor.wat | 4 +- .../contracts/fixtures/set_empty_storage.wat | 4 +- frame/contracts/fixtures/set_rent.wat | 24 ++-- frame/contracts/fixtures/storage_size.wat | 16 +-- .../fixtures/transfer_return_code.wat | 8 +- frame/contracts/src/benchmarking.rs | 4 +- frame/contracts/src/exec.rs | 10 +- frame/contracts/src/lib.rs | 6 +- frame/contracts/src/tests.rs | 4 +- frame/contracts/src/wasm/env_def/macros.rs | 24 ++-- frame/contracts/src/wasm/mod.rs | 136 +++++++++--------- frame/contracts/src/wasm/prepare.rs | 110 ++++++++++---- frame/contracts/src/wasm/runtime.rs | 68 ++++----- 29 files changed, 356 insertions(+), 304 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c82803d7bc9..e080fbcbaa1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5444,9 +5444,9 @@ dependencies = [ [[package]] name = "pwasm-utils" -version = "0.12.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7a12f176deee919f4ba55326ee17491c8b707d0987aed822682c821b660192" +checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" dependencies = [ "byteorder", "log", diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 0d69b970016..e7744200bcc 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -485,7 +485,7 @@ fn full_wasm_block_import_works() { const CODE_TRANSFER: &str = r#" (module -;; ext_call( +;; seal_call( ;; callee_ptr: u32, ;; callee_len: u32, ;; gas: u64, @@ -496,15 +496,15 @@ const CODE_TRANSFER: &str = r#" ;; output_ptr: u32, ;; output_len_ptr: u32 ;; ) -> u32 -(import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) -(import "env" "ext_input" (func $ext_input (param i32 i32))) +(import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) +(import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "deploy") ) (func (export "call") (block $fail ;; Load input data to contract memory - (call $ext_input + (call $seal_input (i32.const 0) (i32.const 52) ) @@ -543,7 +543,7 @@ const CODE_TRANSFER: &str = r#" ) (drop - (call $ext_call + (call $seal_call (i32.const 4) ;; Pointer to "callee" address. (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. diff --git a/frame/contracts/COMPLEXITY.md b/frame/contracts/COMPLEXITY.md index dbb1a5c5cde..32f6f84b89b 100644 --- a/frame/contracts/COMPLEXITY.md +++ b/frame/contracts/COMPLEXITY.md @@ -284,19 +284,19 @@ given runtime. This is the list of getters: -- ext_caller -- ext_address -- ext_weight_to_fee -- ext_gas_left -- ext_balance -- ext_value_transferred -- ext_now -- ext_minimum_balance -- ext_tombstone_deposit -- ext_rent_allowance -- ext_block_number - -### ext_set_storage +- seal_caller +- seal_address +- seal_weight_to_fee +- seal_gas_left +- seal_balance +- seal_value_transferred +- seal_now +- seal_minimum_balance +- seal_tombstone_deposit +- seal_rent_allowance +- seal_block_number + +### seal_set_storage This function receives a `key` and `value` as arguments. It consists of the following steps: @@ -305,7 +305,7 @@ This function receives a `key` and `value` as arguments. It consists of the foll **complexity**: Complexity is proportional to the size of the `value`. This function induces a DB write of size proportional to the `value` size (if flushed to the storage), so should be priced accordingly. -### ext_clear_storage +### seal_clear_storage This function receives a `key` as argument. It consists of the following steps: @@ -315,7 +315,7 @@ This function receives a `key` as argument. It consists of the following steps: **complexity**: Complexity is constant. This function induces a DB write to clear the storage entry (upon being flushed to the storage) and should be priced accordingly. -### ext_get_storage +### seal_get_storage This function receives a `key` as an argument. It consists of the following steps: @@ -330,7 +330,7 @@ performed. Moreover, the DB read has to be synchronous and no progress can be ma **complexity**: The memory and computing complexity is proportional to the size of the fetched value. This function performs a DB read. -### ext_transfer +### seal_transfer This function receives the following arguments: @@ -345,7 +345,7 @@ It consists of the following steps: Loading of `account` and `value` buffers should be charged. This is because the sizes of buffers are specified by the calling code, even though marshaled representations are, essentially, of constant size. This can be fixed by assigning an upper bound for sizes of `AccountId` and `Balance`. -### ext_call +### seal_call This function receives the following arguments: @@ -369,7 +369,7 @@ Loading `input_data` should be charged in any case. **complexity**: All complexity comes from loading and writing buffers and executing `call` executive function. The former component is proportional to the sizes of `callee`, `value`, `input_data` and `output_ptr` buffers. The latter component completely depends on the complexity of `call` executive function, and also dominated by it. -### ext_instantiate +### seal_instantiate This function receives the following arguments: @@ -391,7 +391,7 @@ Loading `init_code` and `input_data` should be charged in any case. **complexity**: All complexity comes from loading buffers and executing `instantiate` executive function. The former component is proportional to the sizes of `init_code`, `value` and `input_data` buffers. The latter component completely depends on the complexity of `instantiate` executive function and also dominated by it. -### ext_terminate +### seal_terminate This function receives the following arguments: @@ -405,13 +405,13 @@ Loading of the `beneficiary` buffer should be charged. This is because the sizes **complexity**: All complexity comes from loading buffers and executing `terminate` executive function. The former component is proportional to the size of the `beneficiary` buffer. The latter component completely depends on the complexity of `terminate` executive function and also dominated by it. -### ext_input +### seal_input This function receives a pointer to contract memory. It copies the input to the contract call to this location. **complexity**: The complextity is proportional to the size of the input buffer. -### ext_return +### seal_return This function receives a `data` buffer and `flags` arguments. Execution of the function consists of the following steps: @@ -421,7 +421,7 @@ This function receives a `data` buffer and `flags` arguments. Execution of the f **complexity**: The complexity of this function is proportional to the size of the `data` buffer. -### ext_deposit_event +### seal_deposit_event This function receives a `data` buffer as an argument. Execution of the function consists of the following steps: @@ -432,7 +432,7 @@ This function receives a `data` buffer as an argument. Execution of the function **complexity**: The complexity of this function is proportional to the size of the `data` buffer. -### ext_set_rent_allowance +### seal_set_rent_allowance This function receives the following argument: diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 74655b9528f..1a42af6833b 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -19,7 +19,7 @@ frame-support = { version = "2.0.0-rc5", default-features = false, path = "../su frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "common" } parity-wasm = { version = "0.41.0", default-features = false } -pwasm-utils = { version = "0.12.0", default-features = false } +pwasm-utils = { version = "0.14.0", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/contracts/fixtures/call_return_code.wat b/frame/contracts/fixtures/call_return_code.wat index d724f904462..f7a7ff20a49 100644 --- a/frame/contracts/fixtures/call_return_code.wat +++ b/frame/contracts/fixtures/call_return_code.wat @@ -2,9 +2,9 @@ ;; of this call to the output buffer. ;; It also forwards its input to the callee. (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0, 8) address of django @@ -23,10 +23,10 @@ (func (export "deploy")) (func (export "call") - (call $ext_input (i32.const 20) (i32.const 24)) + (call $seal_input (i32.const 20) (i32.const 24)) (i32.store (i32.const 16) - (call $ext_call + (call $seal_call (i32.const 0) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -39,6 +39,6 @@ ) ) ;; exit with success and take transfer return code to the output buffer - (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index ee2e16098d5..e8ff2e37971 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -1,9 +1,9 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_balance" (func $ext_balance (param i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_println" (func $ext_println (param i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_println" (func $seal_println (param i32 i32))) (import "env" "memory" (memory 1 1)) (func $assert (param i32) @@ -20,7 +20,7 @@ (i32.sub (get_local $sp) (i32.const 16)) (i32.const 8) ) - (call $ext_balance + (call $seal_balance (i32.sub (get_local $sp) (i32.const 8)) (i32.sub (get_local $sp) (i32.const 16)) ) @@ -41,7 +41,7 @@ (i32.store (i32.const 20) (i32.const 32)) ;; Copy input to this contracts memory - (call $ext_input (i32.const 24) (i32.const 20)) + (call $seal_input (i32.const 24) (i32.const 20)) ;; Input data is the code hash of the contract to be deployed. (call $assert @@ -59,7 +59,7 @@ ;; Fail to deploy the contract since it returns a non-zero exit status. (set_local $exit_code - (call $ext_instantiate + (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -86,7 +86,7 @@ ;; Fail to deploy the contract due to insufficient gas. (set_local $exit_code - (call $ext_instantiate + (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 187500000) ;; Just enough to pay for the instantiate @@ -119,7 +119,7 @@ ;; Deploy the contract successfully. (set_local $exit_code - (call $ext_instantiate + (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -167,7 +167,7 @@ ;; Call the new contract and expect it to return failing exit code. (set_local $exit_code - (call $ext_call + (call $seal_call (i32.const 16) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -203,7 +203,7 @@ ;; Fail to call the contract due to insufficient gas. (set_local $exit_code - (call $ext_call + (call $seal_call (i32.const 16) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 117500000) ;; Just enough to make the call @@ -240,7 +240,7 @@ ;; Call the contract successfully. (set_local $exit_code - (call $ext_call + (call $seal_call (i32.const 16) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. diff --git a/frame/contracts/fixtures/check_default_rent_allowance.wat b/frame/contracts/fixtures/check_default_rent_allowance.wat index b3076a04325..64cd67186bf 100644 --- a/frame/contracts/fixtures/check_default_rent_allowance.wat +++ b/frame/contracts/fixtures/check_default_rent_allowance.wat @@ -1,8 +1,8 @@ (module - (import "env" "ext_rent_allowance" (func $ext_rent_allowance (param i32 i32))) + (import "seal0" "seal_rent_allowance" (func $seal_rent_allowance (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $ext_rent_allowance output + ;; [0, 8) reserved for $seal_rent_allowance output ;; [8, 16) length of the buffer (data (i32.const 8) "\08") @@ -22,7 +22,7 @@ (func (export "deploy") ;; fill the buffer with the rent allowance. - (call $ext_rent_allowance (i32.const 0) (i32.const 8)) + (call $seal_rent_allowance (i32.const 0) (i32.const 8)) ;; assert len == 8 (call $assert diff --git a/frame/contracts/fixtures/crypto_hashes.wat b/frame/contracts/fixtures/crypto_hashes.wat index f7b244b8c1e..c2b4d6b81ed 100644 --- a/frame/contracts/fixtures/crypto_hashes.wat +++ b/frame/contracts/fixtures/crypto_hashes.wat @@ -1,21 +1,21 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "env" "ext_hash_sha2_256" (func $ext_hash_sha2_256 (param i32 i32 i32))) - (import "env" "ext_hash_keccak_256" (func $ext_hash_keccak_256 (param i32 i32 i32))) - (import "env" "ext_hash_blake2_256" (func $ext_hash_blake2_256 (param i32 i32 i32))) - (import "env" "ext_hash_blake2_128" (func $ext_hash_blake2_128 (param i32 i32 i32))) + (import "seal0" "seal_hash_sha2_256" (func $seal_hash_sha2_256 (param i32 i32 i32))) + (import "seal0" "seal_hash_keccak_256" (func $seal_hash_keccak_256 (param i32 i32 i32))) + (import "seal0" "seal_hash_blake2_256" (func $seal_hash_blake2_256 (param i32 i32 i32))) + (import "seal0" "seal_hash_blake2_128" (func $seal_hash_blake2_128 (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) (type $hash_fn_sig (func (param i32 i32 i32))) (table 8 funcref) (elem (i32.const 1) - $ext_hash_sha2_256 - $ext_hash_keccak_256 - $ext_hash_blake2_256 - $ext_hash_blake2_128 + $seal_hash_sha2_256 + $seal_hash_keccak_256 + $seal_hash_blake2_256 + $seal_hash_blake2_128 ) (data (i32.const 1) "20202010201008") ;; Output sizes of the hashes in order in hex. @@ -56,7 +56,7 @@ (local.set $input_len_ptr (i32.const 256)) (local.set $input_ptr (i32.const 10)) (i32.store (local.get $input_len_ptr) (i32.const 246)) - (call $ext_input (local.get $input_ptr) (local.get $input_len_ptr)) + (call $seal_input (local.get $input_ptr) (local.get $input_len_ptr)) (local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr))) (if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7)) ;; We check that the chosen hash fn identifier is within bounds: [0,7] @@ -71,7 +71,7 @@ (local.get $input_ptr) (local.get $chosen_hash_fn) ;; Which crypto hash function to execute. ) - (call $ext_return + (call $seal_return (i32.const 0) (local.get $input_ptr) ;; Linear memory location of the output buffer. (local.get $output_len) ;; Number of output buffer bytes. diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index 3f8a8c89b02..3220f4e612d 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -1,10 +1,10 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32) (result i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 8) Endowment to send when creating contract. @@ -35,7 +35,7 @@ (func (export "deploy") ;; Input data is the code hash of the contract to be deployed. - (call $ext_input (i32.const 48) (i32.const 96)) + (call $seal_input (i32.const 48) (i32.const 96)) (call $assert (i32.eq (i32.load (i32.const 96)) @@ -46,7 +46,7 @@ ;; Deploy the contract with the provided code hash. (call $assert (i32.eq - (call $ext_instantiate + (call $seal_instantiate (i32.const 48) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -72,7 +72,7 @@ ) ;; Store the return address. - (call $ext_set_storage + (call $seal_set_storage (i32.const 16) ;; Pointer to the key (i32.const 80) ;; Pointer to the value (i32.const 8) ;; Length of the value @@ -83,7 +83,7 @@ ;; Read address of destination contract from storage. (call $assert (i32.eq - (call $ext_get_storage + (call $seal_get_storage (i32.const 16) ;; Pointer to the key (i32.const 80) ;; Pointer to the value (i32.const 88) ;; Pointer to the len of the value @@ -101,7 +101,7 @@ ;; Calling the destination contract with non-empty input data should fail. (call $assert (i32.eq - (call $ext_call + (call $seal_call (i32.const 80) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -120,7 +120,7 @@ ;; Call the destination contract regularly, forcing it to self-destruct. (call $assert (i32.eq - (call $ext_call + (call $seal_call (i32.const 80) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -140,7 +140,7 @@ ;; does not keep the contract alive. (call $assert (i32.eq - (call $ext_transfer + (call $seal_transfer (i32.const 80) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index 22422bb859d..9180047f5d0 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -1,9 +1,9 @@ (module - (import "env" "ext_balance" (func $ext_balance (param i32 i32))) - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) + (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $ext_balance output + ;; [0, 8) reserved for $seal_balance output ;; [8, 16) length of the buffer (data (i32.const 8) "\08") @@ -23,7 +23,7 @@ (func (export "call") ;; Send entire remaining balance to the 0 address. - (call $ext_balance (i32.const 0) (i32.const 8)) + (call $seal_balance (i32.const 0) (i32.const 8)) ;; Balance should be encoded as a u64. (call $assert @@ -36,7 +36,7 @@ ;; Self-destruct by sending full balance to the 0 address. (call $assert (i32.eq - (call $ext_transfer + (call $seal_transfer (i32.const 16) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat index bce80ca01fa..20ab96d88ad 100644 --- a/frame/contracts/fixtures/instantiate_return_code.wat +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -3,9 +3,9 @@ ;; The first 32 byte of input is the code hash to instantiate ;; The rest of the input is forwarded to the constructor of the callee (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0, 8) address of django @@ -24,10 +24,10 @@ (func (export "deploy")) (func (export "call") - (call $ext_input (i32.const 24) (i32.const 20)) + (call $seal_input (i32.const 24) (i32.const 20)) (i32.store (i32.const 16) - (call $ext_instantiate + (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -42,6 +42,6 @@ ) ) ;; exit with success and take transfer return code to the output buffer - (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/ok_trap_revert.wat b/frame/contracts/fixtures/ok_trap_revert.wat index 5877e55d0e7..b71a6435db9 100644 --- a/frame/contracts/fixtures/ok_trap_revert.wat +++ b/frame/contracts/fixtures/ok_trap_revert.wat @@ -1,6 +1,6 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "deploy") @@ -13,7 +13,7 @@ (func $ok_trap_revert (i32.store (i32.const 4) (i32.const 4)) - (call $ext_input (i32.const 0) (i32.const 4)) + (call $seal_input (i32.const 0) (i32.const 4)) (block $IF_2 (block $IF_1 (block $IF_0 @@ -26,7 +26,7 @@ return ) ;; 1 = revert - (call $ext_return (i32.const 1) (i32.const 0) (i32.const 0)) + (call $seal_return (i32.const 1) (i32.const 0) (i32.const 0)) (unreachable) ) ;; 2 = trap diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 4107587ada7..3c15f7ae088 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -1,8 +1,8 @@ (module - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_restore_to" - (func $ext_restore_to + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_restore_to" + (func $seal_restore_to (param i32 i32 i32 i32 i32 i32 i32 i32) ) ) @@ -19,7 +19,7 @@ (func (export "call") ;; copy code hash to contract memory - (call $ext_input (i32.const 264) (i32.const 304)) + (call $seal_input (i32.const 264) (i32.const 304)) (call $assert (i32.eq (i32.load (i32.const 304)) @@ -27,7 +27,7 @@ ) ) - (call $ext_restore_to + (call $seal_restore_to ;; Pointer and length of the encoded dest buffer. (i32.const 256) (i32.const 8) @@ -45,14 +45,14 @@ ) (func (export "deploy") ;; Data to restore - (call $ext_set_storage + (call $seal_set_storage (i32.const 0) (i32.const 0) (i32.const 4) ) ;; ACL - (call $ext_set_storage + (call $seal_set_storage (i32.const 100) (i32.const 0) (i32.const 4) @@ -68,7 +68,7 @@ ;; Address of bob (data (i32.const 256) "\02\00\00\00\00\00\00\00") - ;; [264, 296) Code hash of SET_RENT (copied here by ext_input) + ;; [264, 296) Code hash of SET_RENT (copied here by seal_input) ;; [296, 304) Rent allowance (data (i32.const 296) "\32\00\00\00\00\00\00\00") diff --git a/frame/contracts/fixtures/return_from_start_fn.wat b/frame/contracts/fixtures/return_from_start_fn.wat index ba73ef25ed3..854b552a828 100644 --- a/frame/contracts/fixtures/return_from_start_fn.wat +++ b/frame/contracts/fixtures/return_from_start_fn.wat @@ -1,17 +1,17 @@ (module - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) - (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (start $start) (func $start - (call $ext_deposit_event + (call $seal_deposit_event (i32.const 0) ;; The topics buffer (i32.const 0) ;; The topics buffer's length (i32.const 8) ;; The data buffer (i32.const 4) ;; The data buffer's length ) - (call $ext_return + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4) diff --git a/frame/contracts/fixtures/return_with_data.wat b/frame/contracts/fixtures/return_with_data.wat index ad42845ae02..93b9daa07a3 100644 --- a/frame/contracts/fixtures/return_with_data.wat +++ b/frame/contracts/fixtures/return_with_data.wat @@ -1,6 +1,6 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0, 128) buffer where input is copied @@ -16,11 +16,11 @@ ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. (func $call (export "call") ;; Copy input into this contracts memory. - (call $ext_input (i32.const 0) (i32.const 128)) + (call $seal_input (i32.const 0) (i32.const 128)) ;; Copy all but the first 4 bytes of the input data as the output data. ;; Use the first byte as exit status - (call $ext_return + (call $seal_return (i32.load8_u (i32.const 0)) ;; Exit status (i32.const 4) ;; Pointer to the data to return. (i32.sub ;; Count of bytes to copy. diff --git a/frame/contracts/fixtures/self_destruct.wat b/frame/contracts/fixtures/self_destruct.wat index baa38e4d47b..6898e746b08 100644 --- a/frame/contracts/fixtures/self_destruct.wat +++ b/frame/contracts/fixtures/self_destruct.wat @@ -1,11 +1,11 @@ (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_address" (func $ext_address (param i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_address" (func $seal_address (param i32 i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $ext_address output + ;; [0, 8) reserved for $seal_address output ;; [8, 16) length of the buffer (data (i32.const 8) "\08") @@ -13,7 +13,7 @@ ;; [16, 24) Address of django (data (i32.const 16) "\04\00\00\00\00\00\00\00") - ;; [24, 32) reserved for output of $ext_input + ;; [24, 32) reserved for output of $seal_input ;; [32, 36) length of the buffer (data (i32.const 32) "\04") @@ -36,10 +36,10 @@ ;; This should trap instead of self-destructing since a contract cannot be removed live in ;; the execution stack cannot be removed. If the recursive call traps, then trap here as ;; well. - (call $ext_input (i32.const 24) (i32.const 32)) + (call $seal_input (i32.const 24) (i32.const 32)) (if (i32.load (i32.const 32)) (then - (call $ext_address (i32.const 0) (i32.const 8)) + (call $seal_address (i32.const 0) (i32.const 8)) ;; Expect address to be 8 bytes. (call $assert @@ -52,7 +52,7 @@ ;; Recursively call self with empty input data. (call $assert (i32.eq - (call $ext_call + (call $seal_call (i32.const 0) ;; Pointer to own address (i32.const 8) ;; Length of own address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -69,11 +69,11 @@ ) (else ;; Try to terminate and give balance to django. - (call $ext_terminate + (call $seal_terminate (i32.const 16) ;; Pointer to beneficiary address (i32.const 8) ;; Length of beneficiary address ) - (unreachable) ;; ext_terminate never returns + (unreachable) ;; seal_terminate never returns ) ) ) diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index ece5679f4f6..ab8c289f1b5 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -1,5 +1,5 @@ (module - (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) (func $assert (param i32) @@ -13,7 +13,7 @@ (func (export "deploy") ;; Self-destruct by sending full balance to the 0 address. - (call $ext_terminate + (call $seal_terminate (i32.const 0) ;; Pointer to destination address (i32.const 8) ;; Length of destination address ) diff --git a/frame/contracts/fixtures/set_empty_storage.wat b/frame/contracts/fixtures/set_empty_storage.wat index d550eb2314b..dbcd3a1326a 100644 --- a/frame/contracts/fixtures/set_empty_storage.wat +++ b/frame/contracts/fixtures/set_empty_storage.wat @@ -1,12 +1,12 @@ ;; This module stores a KV pair into the storage (module - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) (import "env" "memory" (memory 16 16)) (func (export "call") ) (func (export "deploy") - (call $ext_set_storage + (call $seal_set_storage (i32.const 0) ;; Pointer to storage key (i32.const 0) ;; Pointer to value (i32.load (i32.const 0)) ;; Size of value diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index ba52e9ed752..a09d3dc4bd4 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -1,14 +1,14 @@ (module - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) - (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) - (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal0" "seal_clear_storage" (func $seal_clear_storage (param i32))) + (import "seal0" "seal_set_rent_allowance" (func $seal_set_rent_allowance (param i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; insert a value of 4 bytes into storage (func $call_0 - (call $ext_set_storage + (call $seal_set_storage (i32.const 1) (i32.const 0) (i32.const 4) @@ -17,7 +17,7 @@ ;; remove the value inserted by call_1 (func $call_1 - (call $ext_clear_storage + (call $seal_clear_storage (i32.const 1) ) ) @@ -26,7 +26,7 @@ (func $call_2 (call $assert (i32.eq - (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (call $seal_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) (i32.const 0) ) ) @@ -48,7 +48,7 @@ (func (export "call") (local $input_size i32) (i32.store (i32.const 64) (i32.const 64)) - (call $ext_input (i32.const 1024) (i32.const 64)) + (call $seal_input (i32.const 1024) (i32.const 64)) (set_local $input_size (i32.load (i32.const 64)) ) @@ -76,16 +76,16 @@ ;; Set into storage a 4 bytes value ;; Set call set_rent_allowance with input (func (export "deploy") - (call $ext_set_storage + (call $seal_set_storage (i32.const 0) (i32.const 0) (i32.const 4) ) - (call $ext_input + (call $seal_input (i32.const 0) (i32.const 64) ) - (call $ext_set_rent_allowance + (call $seal_set_rent_allowance (i32.const 0) (i32.load (i32.const 64)) ) diff --git a/frame/contracts/fixtures/storage_size.wat b/frame/contracts/fixtures/storage_size.wat index 579aeda3a06..293a656d4f6 100644 --- a/frame/contracts/fixtures/storage_size.wat +++ b/frame/contracts/fixtures/storage_size.wat @@ -1,7 +1,7 @@ (module - (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "seal0" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32) (result i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "env" "memory" (memory 16 16)) ;; [0, 32) storage key @@ -12,10 +12,10 @@ ;; [36, 40) size of the input buffer (data (i32.const 36) "\04") - ;; [40, 44) size of buffer for ext_get_storage set to max + ;; [40, 44) size of buffer for seal_get_storage set to max (data (i32.const 40) "\FF\FF\FF\FF") - ;; [44, inf) ext_get_storage buffer + ;; [44, inf) seal_get_storage buffer (func $assert (param i32) (block $ok @@ -27,7 +27,7 @@ ) (func (export "call") - (call $ext_input (i32.const 32) (i32.const 36)) + (call $seal_input (i32.const 32) (i32.const 36)) ;; assert input size == 4 (call $assert @@ -38,7 +38,7 @@ ) ;; place a garbage value in storage, the size of which is specified by the call input. - (call $ext_set_storage + (call $seal_set_storage (i32.const 0) ;; Pointer to storage key (i32.const 0) ;; Pointer to value (i32.load (i32.const 32)) ;; Size of value @@ -46,7 +46,7 @@ (call $assert (i32.eq - (call $ext_get_storage + (call $seal_get_storage (i32.const 0) ;; Pointer to storage key (i32.const 44) ;; buffer where to copy result (i32.const 40) ;; pointer to size of buffer diff --git a/frame/contracts/fixtures/transfer_return_code.wat b/frame/contracts/fixtures/transfer_return_code.wat index 87d186811e7..7a1bec9adf3 100644 --- a/frame/contracts/fixtures/transfer_return_code.wat +++ b/frame/contracts/fixtures/transfer_return_code.wat @@ -1,8 +1,8 @@ ;; This transfers 100 balance to the zero account and copies the return code ;; of this transfer to the output buffer. (module - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0, 8) zero-adress @@ -18,7 +18,7 @@ (func (export "call") (i32.store (i32.const 16) - (call $ext_transfer + (call $seal_transfer (i32.const 0) ;; ptr to destination address (i32.const 8) ;; length of destination address (i32.const 8) ;; ptr to value to transfer @@ -26,6 +26,6 @@ ) ) ;; exit with success and take transfer return code to the output buffer - (call $ext_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) ) ) diff --git a/frame/contracts/src/benchmarking.rs b/frame/contracts/src/benchmarking.rs index 4bdb14576ee..1a04db4defd 100644 --- a/frame/contracts/src/benchmarking.rs +++ b/frame/contracts/src/benchmarking.rs @@ -130,9 +130,9 @@ benchmarks! { // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. // The size of the data has no influence on the costs of this extrinsic as long as the contract - // won't call `ext_input` in its constructor to copy the data to contract memory. + // won't call `seal_input` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as - // part of `ext_input`. + // part of `seal_input`. instantiate { let data = vec![0u8; 128]; let endowment = Config::::subsistence_threshold_uncached(); diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index a2fb50dd3f3..ce4e17cd1b9 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -58,9 +58,9 @@ pub enum TransactorKind { /// Output of a contract call or instantiation which ran to completion. #[cfg_attr(test, derive(PartialEq, Eq, Debug))] pub struct ExecReturnValue { - /// Flags passed along by `ext_return`. Empty when `ext_return` was never called. + /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, - /// Buffer passed along by `ext_return`. Empty when `ext_return` was never called. + /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. pub data: Vec, } @@ -355,7 +355,7 @@ where // `collect_rent` will be done on first call and destination contract and balance // cannot be changed before the first call // We do not allow 'calling' plain accounts. For transfering value - // `ext_transfer` must be used. + // `seal_transfer` must be used. let contract = if let Some(ContractInfo::Alive(info)) = rent::collect_rent::(&dest) { info } else { @@ -455,7 +455,7 @@ where // We need each contract that exists to be above the subsistence threshold // in order to keep up the guarantuee that we always leave a tombstone behind - // with the exception of a contract that called `ext_terminate`. + // with the exception of a contract that called `seal_terminate`. if T::Currency::total_balance(&dest) < nested.config.subsistence_threshold() { Err(Error::::NewContractNotFunded)? } @@ -599,7 +599,7 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( Err(Error::::OutOfGas)? } - // Only ext_terminate is allowed to bring the sender below the subsistence + // Only seal_terminate is allowed to bring the sender below the subsistence // threshold or even existential deposit. let existence_requirement = match (cause, origin) { (Terminate, _) => ExistenceRequirement::AllowDeath, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 24e5ece5bb8..138c8e995a0 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -419,7 +419,7 @@ decl_error! { OutputBufferTooSmall, /// Performing the requested transfer would have brought the contract below /// the subsistence threshold. No transfer is allowed to do this in order to allow - /// for a tombstone to be created. Use `ext_terminate` to remove a contract without + /// for a tombstone to be created. Use `seal_terminate` to remove a contract without /// leaving a tombstone behind. BelowSubsistenceThreshold, /// The newly created contract is below the subsistence threshold after executing @@ -768,7 +768,7 @@ impl Config { /// Rent or any contract initiated balance transfer mechanism cannot make the balance lower /// than the subsistence threshold in order to guarantee that a tombstone is created. /// - /// The only way to completely kill a contract without a tombstone is calling `ext_terminate`. + /// The only way to completely kill a contract without a tombstone is calling `seal_terminate`. pub fn subsistence_threshold(&self) -> BalanceOf { self.existential_deposit.saturating_add(self.tombstone_deposit) } @@ -846,7 +846,7 @@ pub struct Schedule { /// Maximum allowed size of a declared table. pub max_table_size: u32, - /// Whether the `ext_println` function is allowed to be used contracts. + /// Whether the `seal_println` function is allowed to be used contracts. /// MUST only be enabled for `dev` chains, NOT for production chains pub enable_println: bool, diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 37ded30a693..bd1242ff670 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -358,7 +358,7 @@ fn account_removal_does_not_remove_storage() { // // This does not remove the contract storage as we are not notified about a // account removal. This cannot happen in reality because a contract can only - // remove itself by `ext_terminate`. There is no external event that can remove + // remove itself by `seal_terminate`. There is no external event that can remove // the account appart from that. assert_ok!(Balances::transfer(Origin::signed(ALICE), BOB, 20)); @@ -1546,7 +1546,7 @@ fn cannot_self_destruct_in_constructor() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - // Fail to instantiate the BOB because the contructor calls ext_terminate. + // Fail to instantiate the BOB because the contructor calls seal_terminate. assert_err_ignore_postinfo!( Contracts::instantiate( Origin::signed(ALICE), diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 335d35f1e78..2538f85fb73 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -123,8 +123,8 @@ macro_rules! unmarshall_then_body_then_marshall { #[macro_export] macro_rules! define_func { - ( < E: $ext_ty:tt > $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { - fn $name< E: $ext_ty >( + ( < E: $seal_ty:tt > $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { + fn $name< E: $seal_ty >( $ctx: &mut $crate::wasm::Runtime, args: &[sp_sandbox::Value], ) -> Result { @@ -142,9 +142,9 @@ macro_rules! define_func { #[macro_export] macro_rules! register_func { - ( $reg_cb:ident, < E: $ext_ty:tt > ; ) => {}; + ( $reg_cb:ident, < E: $seal_ty:tt > ; ) => {}; - ( $reg_cb:ident, < E: $ext_ty:tt > ; + ( $reg_cb:ident, < E: $seal_ty:tt > ; $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* => $body:tt $($rest:tt)* ) => { @@ -152,12 +152,12 @@ macro_rules! register_func { stringify!($name).as_bytes(), { define_func!( - < E: $ext_ty > $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body + < E: $seal_ty > $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body ); $name:: } ); - register_func!( $reg_cb, < E: $ext_ty > ; $($rest)* ); + register_func!( $reg_cb, < E: $seal_ty > ; $($rest)* ); }; } @@ -169,7 +169,7 @@ macro_rules! register_func { /// It's up to the user of this macro to check signatures of wasm code to be executed /// and reject the code if any imported function has a mismatched signature. macro_rules! define_env { - ( $init_name:ident , < E: $ext_ty:tt > , + ( $init_name:ident , < E: $seal_ty:tt > , $( $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* => $body:tt , )* ) => { @@ -185,7 +185,7 @@ macro_rules! define_env { impl $crate::wasm::env_def::FunctionImplProvider for $init_name { fn impls)>(f: &mut F) { - register_func!(f, < E: $ext_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); + register_func!(f, < E: $seal_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); } } }; @@ -255,7 +255,7 @@ mod tests { #[test] fn macro_define_func() { - define_func!( ext_gas (_ctx, amount: u32) => { + define_func!( seal_gas (_ctx, amount: u32) => { let amount = Gas::from(amount); if !amount.is_zero() { Ok(()) @@ -264,7 +264,7 @@ mod tests { } }); let _f: fn(&mut Runtime, &[sp_sandbox::Value]) - -> Result = ext_gas::; + -> Result = seal_gas::; } #[test] @@ -307,7 +307,7 @@ mod tests { use crate::wasm::env_def::ImportSatisfyCheck; define_env!(Env, , - ext_gas( _ctx, amount: u32 ) => { + seal_gas( _ctx, amount: u32 ) => { let amount = Gas::from(amount); if !amount.is_zero() { Ok(()) @@ -317,7 +317,7 @@ mod tests { }, ); - assert!(Env::can_satisfy(b"ext_gas", &FunctionType::new(vec![ValueType::I32], None))); + assert!(Env::can_satisfy(b"seal_gas", &FunctionType::new(vec![ValueType::I32], None))); assert!(!Env::can_satisfy(b"not_exists", &FunctionType::new(vec![], None))); } } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 7f985e90b66..e74adfcf3ca 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -127,9 +127,9 @@ impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a> { }); let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); - imports.add_memory("env", "memory", memory.clone()); + imports.add_memory(self::prepare::IMPORT_MODULE_MEMORY, "memory", memory.clone()); runtime::Env::impls(&mut |name, func_ptr| { - imports.add_host_func("env", name, func_ptr); + imports.add_host_func(self::prepare::IMPORT_MODULE_FN, name, func_ptr); }); let mut runtime = Runtime::new( @@ -477,17 +477,17 @@ mod tests { const CODE_TRANSFER: &str = r#" (module - ;; ext_transfer( + ;; seal_transfer( ;; account_ptr: u32, ;; account_len: u32, ;; value_ptr: u32, ;; value_len: u32, ;;) -> u32 - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop - (call $ext_transfer + (call $seal_transfer (i32.const 4) ;; Pointer to "account" address. (i32.const 8) ;; Length of "account" address. (i32.const 12) ;; Pointer to the buffer with value to transfer @@ -530,7 +530,7 @@ mod tests { const CODE_CALL: &str = r#" (module - ;; ext_call( + ;; seal_call( ;; callee_ptr: u32, ;; callee_len: u32, ;; gas: u64, @@ -541,11 +541,11 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;;) -> u32 - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop - (call $ext_call + (call $seal_call (i32.const 4) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -594,7 +594,7 @@ mod tests { const CODE_INSTANTIATE: &str = r#" (module - ;; ext_instantiate( + ;; seal_instantiate( ;; code_ptr: u32, ;; code_len: u32, ;; gas: u64, @@ -608,11 +608,11 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;; ) -> u32 - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop - (call $ext_instantiate + (call $seal_instantiate (i32.const 16) ;; Pointer to `code_hash` (i32.const 32) ;; Length of `code_hash` (i64.const 0) ;; How much gas to devote for the execution. 0 = all. @@ -665,14 +665,14 @@ mod tests { const CODE_TERMINATE: &str = r#" (module - ;; ext_terminate( + ;; seal_terminate( ;; beneficiary_ptr: u32, ;; beneficiary_len: u32, ;; ) - (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $ext_terminate + (call $seal_terminate (i32.const 4) ;; Pointer to "beneficiary" address. (i32.const 8) ;; Length of "beneficiary" address. ) @@ -706,7 +706,7 @@ mod tests { const CODE_TRANSFER_LIMITED_GAS: &str = r#" (module - ;; ext_call( + ;; seal_call( ;; callee_ptr: u32, ;; callee_len: u32, ;; gas: u64, @@ -717,11 +717,11 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;;) -> u32 - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop - (call $ext_call + (call $seal_call (i32.const 4) ;; Pointer to "callee" address. (i32.const 8) ;; Length of "callee" address. (i64.const 228) ;; How much gas to devote for the execution. @@ -770,8 +770,8 @@ mod tests { const CODE_GET_STORAGE: &str = r#" (module - (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0, 32) key for get storage @@ -800,7 +800,7 @@ mod tests { ;; Load a storage value into contract memory. (call $assert (i32.eq - (call $ext_get_storage + (call $seal_get_storage (i32.const 0) ;; The pointer to the storage key to fetch (i32.const 36) ;; Pointer to the output buffer (i32.const 32) ;; Pointer to the size of the buffer @@ -818,13 +818,13 @@ mod tests { ) ;; Return the contents of the buffer - (call $ext_return + (call $seal_return (i32.const 0) (i32.const 36) (get_local $buf_size) ) - ;; env:ext_return doesn't return, so this is effectively unreachable. + ;; env:seal_return doesn't return, so this is effectively unreachable. (unreachable) ) @@ -849,10 +849,10 @@ mod tests { assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() }); } - /// calls `ext_caller` and compares the result with the constant 42. + /// calls `seal_caller` and compares the result with the constant 42. const CODE_CALLER: &str = r#" (module - (import "env" "ext_caller" (func $ext_caller (param i32 i32))) + (import "seal0" "seal_caller" (func $seal_caller (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -869,7 +869,7 @@ mod tests { (func (export "call") ;; fill the buffer with the caller. - (call $ext_caller (i32.const 0) (i32.const 32)) + (call $seal_caller (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -902,10 +902,10 @@ mod tests { ).unwrap(); } - /// calls `ext_address` and compares the result with the constant 69. + /// calls `seal_address` and compares the result with the constant 69. const CODE_ADDRESS: &str = r#" (module - (import "env" "ext_address" (func $ext_address (param i32 i32))) + (import "seal0" "seal_address" (func $seal_address (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -922,7 +922,7 @@ mod tests { (func (export "call") ;; fill the buffer with the self address. - (call $ext_address (i32.const 0) (i32.const 32)) + (call $seal_address (i32.const 0) (i32.const 32)) ;; assert size == 8 (call $assert @@ -957,7 +957,7 @@ mod tests { const CODE_BALANCE: &str = r#" (module - (import "env" "ext_balance" (func $ext_balance (param i32 i32))) + (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -974,7 +974,7 @@ mod tests { (func (export "call") ;; This stores the balance in the buffer - (call $ext_balance (i32.const 0) (i32.const 32)) + (call $seal_balance (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1009,7 +1009,7 @@ mod tests { const CODE_GAS_PRICE: &str = r#" (module - (import "env" "ext_weight_to_fee" (func $ext_weight_to_fee (param i64 i32 i32))) + (import "seal0" "seal_weight_to_fee" (func $seal_weight_to_fee (param i64 i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1026,7 +1026,7 @@ mod tests { (func (export "call") ;; This stores the gas price in the buffer - (call $ext_weight_to_fee (i64.const 2) (i32.const 0) (i32.const 32)) + (call $seal_weight_to_fee (i64.const 2) (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1061,8 +1061,8 @@ mod tests { const CODE_GAS_LEFT: &str = r#" (module - (import "env" "ext_gas_left" (func $ext_gas_left (param i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_gas_left" (func $seal_gas_left (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1079,7 +1079,7 @@ mod tests { (func (export "call") ;; This stores the gas left in the buffer - (call $ext_gas_left (i32.const 0) (i32.const 32)) + (call $seal_gas_left (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1090,7 +1090,7 @@ mod tests { ) ;; return gas left - (call $ext_return (i32.const 0) (i32.const 0) (i32.const 8)) + (call $seal_return (i32.const 0) (i32.const 0) (i32.const 8)) (unreachable) ) @@ -1116,7 +1116,7 @@ mod tests { const CODE_VALUE_TRANSFERRED: &str = r#" (module - (import "env" "ext_value_transferred" (func $ext_value_transferred (param i32 i32))) + (import "seal0" "seal_value_transferred" (func $seal_value_transferred (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1133,7 +1133,7 @@ mod tests { (func (export "call") ;; This stores the value transferred in the buffer - (call $ext_value_transferred (i32.const 0) (i32.const 32)) + (call $seal_value_transferred (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1168,12 +1168,12 @@ mod tests { const CODE_RETURN_FROM_START_FN: &str = r#" (module - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) (start $start) (func $start - (call $ext_return + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4) @@ -1204,7 +1204,7 @@ mod tests { const CODE_TIMESTAMP_NOW: &str = r#" (module - (import "env" "ext_now" (func $ext_now (param i32 i32))) + (import "seal0" "seal_now" (func $seal_now (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1221,7 +1221,7 @@ mod tests { (func (export "call") ;; This stores the block timestamp in the buffer - (call $ext_now (i32.const 0) (i32.const 32)) + (call $seal_now (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1256,7 +1256,7 @@ mod tests { const CODE_MINIMUM_BALANCE: &str = r#" (module - (import "env" "ext_minimum_balance" (func $ext_minimum_balance (param i32 i32))) + (import "seal0" "seal_minimum_balance" (func $seal_minimum_balance (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1272,7 +1272,7 @@ mod tests { ) (func (export "call") - (call $ext_minimum_balance (i32.const 0) (i32.const 32)) + (call $seal_minimum_balance (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1307,7 +1307,7 @@ mod tests { const CODE_TOMBSTONE_DEPOSIT: &str = r#" (module - (import "env" "ext_tombstone_deposit" (func $ext_tombstone_deposit (param i32 i32))) + (import "seal0" "seal_tombstone_deposit" (func $seal_tombstone_deposit (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1323,7 +1323,7 @@ mod tests { ) (func (export "call") - (call $ext_tombstone_deposit (i32.const 0) (i32.const 32)) + (call $seal_tombstone_deposit (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1358,8 +1358,8 @@ mod tests { const CODE_RANDOM: &str = r#" (module - (import "env" "ext_random" (func $ext_random (param i32 i32 i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_random" (func $seal_random (param i32 i32 i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) ;; [0,128) is reserved for the result of PRNG. @@ -1384,7 +1384,7 @@ mod tests { (func (export "call") ;; This stores the block random seed in the buffer - (call $ext_random + (call $seal_random (i32.const 128) ;; Pointer in memory to the start of the subject buffer (i32.const 32) ;; The subject buffer's length (i32.const 0) ;; Pointer to the output buffer @@ -1400,7 +1400,7 @@ mod tests { ) ;; return the random data - (call $ext_return + (call $seal_return (i32.const 0) (i32.const 0) (i32.const 32) @@ -1433,11 +1433,11 @@ mod tests { const CODE_DEPOSIT_EVENT: &str = r#" (module - (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) + (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $ext_deposit_event + (call $seal_deposit_event (i32.const 32) ;; Pointer to the start of topics buffer (i32.const 33) ;; The length of the topics buffer. (i32.const 8) ;; Pointer to the start of the data buffer @@ -1475,11 +1475,11 @@ mod tests { const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" (module - (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) + (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $ext_deposit_event + (call $seal_deposit_event (i32.const 32) ;; Pointer to the start of topics buffer (i32.const 161) ;; The length of the topics buffer. (i32.const 8) ;; Pointer to the start of the data buffer @@ -1521,11 +1521,11 @@ mod tests { const CODE_DEPOSIT_EVENT_DUPLICATES: &str = r#" (module - (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) + (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $ext_deposit_event + (call $seal_deposit_event (i32.const 32) ;; Pointer to the start of topics buffer (i32.const 129) ;; The length of the topics buffer. (i32.const 8) ;; Pointer to the start of the data buffer @@ -1564,10 +1564,10 @@ mod tests { ); } - /// calls `ext_block_number` compares the result with the constant 121. + /// calls `seal_block_number` compares the result with the constant 121. const CODE_BLOCK_NUMBER: &str = r#" (module - (import "env" "ext_block_number" (func $ext_block_number (param i32 i32))) + (import "seal0" "seal_block_number" (func $seal_block_number (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1584,7 +1584,7 @@ mod tests { (func (export "call") ;; This stores the block height in the buffer - (call $ext_block_number (i32.const 0) (i32.const 32)) + (call $seal_block_number (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1619,8 +1619,8 @@ mod tests { const CODE_RETURN_WITH_DATA: &str = r#" (module - (import "env" "ext_input" (func $ext_input (param i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) (data (i32.const 32) "\20") @@ -1633,13 +1633,13 @@ mod tests { ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. (func $call (export "call") ;; Copy input data this contract memory. - (call $ext_input + (call $seal_input (i32.const 0) ;; Pointer where to store input (i32.const 32) ;; Pointer to the length of the buffer ) ;; Copy all but the first 4 bytes of the input data as the output data. - (call $ext_return + (call $seal_return (i32.load (i32.const 0)) (i32.const 4) (i32.sub (i32.load (i32.const 32)) (i32.const 4)) @@ -1650,7 +1650,7 @@ mod tests { "#; #[test] - fn ext_return_with_success_status() { + fn seal_return_with_success_status() { let output = execute( CODE_RETURN_WITH_DATA, hex!("00000000445566778899").to_vec(), @@ -1677,13 +1677,13 @@ mod tests { const CODE_OUT_OF_BOUNDS_ACCESS: &str = r#" (module - (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "deploy")) (func (export "call") - (call $ext_terminate + (call $seal_terminate (i32.const 65536) ;; Pointer to "account" address (out of bound). (i32.const 8) ;; Length of "account" address. ) @@ -1712,13 +1712,13 @@ mod tests { const CODE_DECODE_FAILURE: &str = r#" (module - (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "deploy")) (func (export "call") - (call $ext_terminate + (call $seal_terminate (i32.const 0) ;; Pointer to "account" address. (i32.const 4) ;; Length of "account" address (too small -> decode fail). ) diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 2ffbe3bbdf6..97cb06fa260 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -28,6 +28,14 @@ use pwasm_utils::rules; use sp_std::prelude::*; use sp_runtime::traits::{SaturatedConversion}; +/// Currently, all imported functions must be located inside this module. We might support +/// additional modules for versioning later. +pub const IMPORT_MODULE_FN: &str = "seal0"; + +/// Imported memory must be located inside this module. The reason for that is that current +/// compiler toolchains might not support specifying other modules than "env" for memory imports. +pub const IMPORT_MODULE_MEMORY: &str = "env"; + struct ContractModule<'a> { /// A deserialized module. The module is valid (this is Guaranteed by `new` method). module: elements::Module, @@ -146,8 +154,11 @@ impl<'a> ContractModule<'a> { .with_grow_cost(self.schedule.grow_mem_cost.clone().saturated_into()) .with_forbidden_floats(); - let contract_module = pwasm_utils::inject_gas_counter(self.module, &gas_rules) - .map_err(|_| "gas instrumentation failed")?; + let contract_module = pwasm_utils::inject_gas_counter( + self.module, + &gas_rules, + IMPORT_MODULE_FN + ).map_err(|_| "gas instrumentation failed")?; Ok(ContractModule { module: contract_module, schedule: self.schedule, @@ -270,17 +281,19 @@ impl<'a> ContractModule<'a> { let mut imported_mem_type = None; for import in import_entries { - if import.module() != "env" { - // This import tries to import something from non-"env" module, - // but all imports are located in "env" at the moment. - return Err("module has imports from a non-'env' namespace"); - } - let type_idx = match import.external() { &External::Table(_) => return Err("Cannot import tables"), &External::Global(_) => return Err("Cannot import globals"), - &External::Function(ref type_idx) => type_idx, + &External::Function(ref type_idx) => { + if import.module() != IMPORT_MODULE_FN { + return Err("Invalid module for imported function"); + } + type_idx + }, &External::Memory(ref memory_type) => { + if import.module() != IMPORT_MODULE_MEMORY { + return Err("Invalid module for imported memory"); + } if import.field() != "memory" { return Err("Memory import must have the field name 'memory'") } @@ -296,10 +309,10 @@ impl<'a> ContractModule<'a> { .get(*type_idx as usize) .ok_or_else(|| "validation: import entry points to a non-existent type")?; - // We disallow importing `ext_println` unless debug features are enabled, + // We disallow importing `seal_println` unless debug features are enabled, // which should only be allowed on a dev chain - if !self.schedule.enable_println && import.field().as_bytes() == b"ext_println" { - return Err("module imports `ext_println` but debug features disabled"); + if !self.schedule.enable_println && import.field().as_bytes() == b"seal_println" { + return Err("module imports `seal_println` but debug features disabled"); } // We disallow importing `gas` function here since it is treated as implementation detail. @@ -409,7 +422,7 @@ mod tests { nop(_ctx, _unused: u64) => { unreachable!(); }, - ext_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, + seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, ); macro_rules! prepare_test { @@ -548,7 +561,7 @@ mod tests { prepare_test!(table_import, r#" (module - (import "env" "table" (table 1 anyfunc)) + (import "seal0" "table" (table 1 anyfunc)) (func (export "call")) (func (export "deploy")) @@ -560,7 +573,7 @@ mod tests { prepare_test!(global_import, r#" (module - (global $g (import "env" "global") i32) + (global $g (import "seal0" "global") i32) (func (export "call")) (func (export "deploy")) ) @@ -618,7 +631,7 @@ mod tests { prepare_test!(can_import_legit_function, r#" (module - (import "env" "nop" (func (param i64))) + (import "seal0" "nop" (func (param i64))) (func (export "call")) (func (export "deploy")) @@ -632,7 +645,7 @@ mod tests { prepare_test!(can_not_import_gas_function, r#" (module - (import "env" "gas" (func (param i32))) + (import "seal0" "gas" (func (param i32))) (func (export "call")) (func (export "deploy")) @@ -641,24 +654,63 @@ mod tests { Err("module imports a non-existent function") ); - // nothing can be imported from non-"env" module for now. - prepare_test!(non_env_import, + // memory is in "env" and not in "seal0" + prepare_test!(memory_not_in_seal0, + r#" + (module + (import "seal0" "memory" (memory 1 1)) + + (func (export "call")) + (func (export "deploy")) + ) + "#, + Err("Invalid module for imported memory") + ); + + // memory is in "env" and not in some arbitrary module + prepare_test!(memory_not_in_arbitrary_module, + r#" + (module + (import "any_module" "memory" (memory 1 1)) + + (func (export "call")) + (func (export "deploy")) + ) + "#, + Err("Invalid module for imported memory") + ); + + // functions are in "env" and not in "seal0" + prepare_test!(function_not_in_env, + r#" + (module + (import "env" "nop" (func (param i64))) + + (func (export "call")) + (func (export "deploy")) + ) + "#, + Err("Invalid module for imported function") + ); + + // functions are in "seal0" and not in in some arbitrary module + prepare_test!(function_not_arbitrary_module, r#" (module - (import "another_module" "memory" (memory 1 1)) + (import "any_module" "nop" (func (param i64))) (func (export "call")) (func (export "deploy")) ) "#, - Err("module has imports from a non-'env' namespace") + Err("Invalid module for imported function") ); // wrong signature prepare_test!(wrong_signature, r#" (module - (import "env" "gas" (func (param i64))) + (import "seal0" "gas" (func (param i64))) (func (export "call")) (func (export "deploy")) @@ -670,7 +722,7 @@ mod tests { prepare_test!(unknown_func_name, r#" (module - (import "env" "unknown_func" (func)) + (import "seal0" "unknown_func" (func)) (func (export "call")) (func (export "deploy")) @@ -679,24 +731,24 @@ mod tests { Err("module imports a non-existent function") ); - prepare_test!(ext_println_debug_disabled, + prepare_test!(seal_println_debug_disabled, r#" (module - (import "env" "ext_println" (func $ext_println (param i32 i32))) + (import "seal0" "seal_println" (func $seal_println (param i32 i32))) (func (export "call")) (func (export "deploy")) ) "#, - Err("module imports `ext_println` but debug features disabled") + Err("module imports `seal_println` but debug features disabled") ); #[test] - fn ext_println_debug_enabled() { + fn seal_println_debug_enabled() { let wasm = wat::parse_str( r#" (module - (import "env" "ext_println" (func $ext_println (param i32 i32))) + (import "seal0" "seal_println" (func $seal_println (param i32 i32))) (func (export "call")) (func (export "deploy")) @@ -745,7 +797,7 @@ mod tests { prepare_test!(try_sneak_export_as_entrypoint, r#" (module - (import "env" "panic" (func)) + (import "seal0" "panic" (func)) (func (export "deploy")) diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index ed97a4dae3c..806c956d292 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -87,7 +87,7 @@ impl From for ReturnCode { } } -/// The data passed through when a contract uses `ext_return`. +/// The data passed through when a contract uses `seal_return`. struct ReturnData { /// The flags as passed through by the contract. They are still unchecked and /// will later be parsed into a `ReturnFlags` bitflags struct. @@ -106,10 +106,10 @@ enum TrapReason { /// The supervisor trapped the contract because of an error condition occurred during /// execution in privileged code. SupervisorError(DispatchError), - /// Signals that trap was generated in response to call `ext_return` host function. + /// Signals that trap was generated in response to call `seal_return` host function. Return(ReturnData), /// Signals that a trap was generated in response to a successful call to the - /// `ext_terminate` host function. + /// `seal_terminate` host function. Termination, /// Signals that a trap was generated because of a successful restoration. Restoration, @@ -378,7 +378,7 @@ fn write_sandbox_memory( /// /// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying -/// output optional. For example to skip copying back the output buffer of an `ext_call` +/// output optional. For example to skip copying back the output buffer of an `seal_call` /// when the caller is not interested in the result. /// /// In addition to the error conditions of `write_sandbox_memory` this functions returns @@ -538,7 +538,7 @@ define_env!(Env, , // // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). - ext_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { + seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { if value_len > ctx.ext.max_value_size() { // Bail out if value length exceeds the set maximum value size. return Err(sp_sandbox::HostError); @@ -555,7 +555,7 @@ define_env!(Env, , // # Parameters // // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. - ext_clear_storage(ctx, key_ptr: u32) => { + seal_clear_storage(ctx, key_ptr: u32) => { let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; ctx.ext.set_storage(key, None); @@ -574,7 +574,7 @@ define_env!(Env, , // # Errors // // `ReturnCode::KeyNotFound` - ext_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; if let Some(value) = ctx.ext.get_storage(&key) { @@ -600,7 +600,7 @@ define_env!(Env, , // // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` - ext_transfer( + seal_transfer( ctx, account_ptr: u32, account_len: u32, @@ -647,7 +647,7 @@ define_env!(Env, , // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` // `ReturnCode::NotCallable` - ext_call( + seal_call( ctx, callee_ptr: u32, callee_len: u32, @@ -734,7 +734,7 @@ define_env!(Env, , // `ReturnCode::TransferFailed` // `ReturnCode::NewContractNotFunded` // `ReturnCode::CodeNotFound` - ext_instantiate( + seal_instantiate( ctx, code_hash_ptr: u32, code_hash_len: u32, @@ -798,7 +798,7 @@ define_env!(Env, , // # Traps // // - The contract is live i.e is already on the call stack. - ext_terminate( + seal_terminate( ctx, beneficiary_ptr: u32, beneficiary_len: u32 @@ -812,7 +812,7 @@ define_env!(Env, , Err(sp_sandbox::HostError) }, - ext_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { + seal_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { if let Some(input) = ctx.input_data.take() { write_sandbox_output(ctx, buf_ptr, buf_len_ptr, &input, false) } else { @@ -826,7 +826,7 @@ define_env!(Env, , // This is the only way to return a data buffer to the caller. Returning from // execution without calling this function is equivalent to calling: // ``` - // ext_return(0, 0, 0); + // seal_return(0, 0, 0); // ``` // // The flags argument is a bitfield that can be used to signal special return @@ -837,7 +837,7 @@ define_env!(Env, , // --- msb --- // // Using a reserved bit triggers a trap. - ext_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { charge_gas( ctx.gas_meter, ctx.schedule, @@ -866,7 +866,7 @@ define_env!(Env, , // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the // address of the contract will be returned. The value is encoded as T::AccountId. - ext_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false) }, @@ -876,7 +876,7 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - ext_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.address().encode(), false) }, @@ -893,7 +893,7 @@ define_env!(Env, , // // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. - ext_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output( ctx, out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false ) @@ -907,7 +907,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as Gas. - ext_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false) }, @@ -919,7 +919,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - ext_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false) }, @@ -931,7 +931,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - ext_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output( ctx, out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false ) @@ -945,7 +945,7 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Hash. - ext_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { // The length of a subject can't exceed `max_subject_len`. if subject_len > ctx.schedule.max_subject_len { return Err(sp_sandbox::HostError); @@ -962,14 +962,14 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - ext_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.now().encode(), false) }, // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. // // The data is encoded as T::Balance. - ext_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false) }, @@ -988,7 +988,7 @@ define_env!(Env, , // a contract to leave a tombstone the balance of the contract must not go // below the sum of existential deposit and the tombstone deposit. The sum // is commonly referred as subsistence threshold in code. - ext_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output( ctx, out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false ) @@ -1020,7 +1020,7 @@ define_env!(Env, , // // - Tombstone hashes do not match // - Calling cantract is live i.e is already on the call stack. - ext_restore_to( + seal_restore_to( ctx, dest_ptr: u32, dest_len: u32, @@ -1077,7 +1077,7 @@ define_env!(Env, , // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. // - data_ptr - a pointer to a raw data buffer which will saved along the event. // - data_len - the length of the data buffer. - ext_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { + seal_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), _ => read_sandbox_memory_as(ctx, topics_ptr, topics_len)?, @@ -1111,7 +1111,7 @@ define_env!(Env, , // - value_ptr: a pointer to the buffer with value, how much to allow for rent // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. - ext_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { + seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; ctx.ext.set_rent_allowance(value); @@ -1127,14 +1127,14 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - ext_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false) }, // Prints utf8 encoded string from the data buffer. // Only available on `--dev` chains. // This function may be removed at any time, superseded by a more general contract debugging feature. - ext_println(ctx, str_ptr: u32, str_len: u32) => { + seal_println(ctx, str_ptr: u32, str_len: u32) => { let data = read_sandbox_memory(ctx, str_ptr, str_len)?; if let Ok(utf8) = core::str::from_utf8(&data) { sp_runtime::print(utf8); @@ -1148,7 +1148,7 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - ext_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false) }, @@ -1172,7 +1172,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - ext_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { compute_hash_on_intermediate_buffer(ctx, sha2_256, input_ptr, input_len, output_ptr) }, @@ -1196,7 +1196,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - ext_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { compute_hash_on_intermediate_buffer(ctx, keccak_256, input_ptr, input_len, output_ptr) }, @@ -1220,7 +1220,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - ext_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { compute_hash_on_intermediate_buffer(ctx, blake2_256, input_ptr, input_len, output_ptr) }, @@ -1244,7 +1244,7 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - ext_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { compute_hash_on_intermediate_buffer(ctx, blake2_128, input_ptr, input_len, output_ptr) }, ); -- GitLab From 38a974ca64dc3dbfde04dba2aa4e44a4333a3261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Mon, 10 Aug 2020 22:52:39 +0100 Subject: [PATCH 073/132] grandpa: log errors and grandpa unexpected conclusion (#6867) --- client/finality-grandpa/src/lib.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a586dc8e31f..ab84591f9cd 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -60,7 +60,7 @@ use futures::{ prelude::*, StreamExt, }; -use log::{debug, info}; +use log::{debug, error, info}; use sc_client_api::{ backend::{AuxStore, Backend}, LockImportRun, BlockchainEvents, CallExecutor, @@ -787,8 +787,12 @@ pub fn run_grandpa_voter( justification_sender, ); - let voter_work = voter_work - .map(|_| ()); + let voter_work = voter_work.map(|res| match res { + Ok(()) => error!(target: "afg", + "GRANDPA voter future has concluded naturally, this should be unreachable." + ), + Err(e) => error!(target: "afg", "GRANDPA voter error: {:?}", e), + }); // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. let telemetry_task = telemetry_task @@ -1052,7 +1056,9 @@ where Poll::Pending => {} Poll::Ready(Ok(())) => { // voters don't conclude naturally - return Poll::Ready(Err(Error::Safety("GRANDPA voter has concluded.".into()))) + return Poll::Ready( + Err(Error::Safety("finality-grandpa inner voter has concluded.".into())) + ) } Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error @@ -1069,7 +1075,9 @@ where Poll::Pending => {} Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Ok(())) + return Poll::Ready( + Err(Error::Safety("`voter_commands_rx` was closed.".into())) + ) } Poll::Ready(Some(command)) => { // some command issued externally -- GitLab From 0c8329c71c241cce09feb0372ee5d5d4a161d11a Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 11 Aug 2020 12:33:30 +0200 Subject: [PATCH 074/132] Custom Codec Implenetation for NPoS Election (#6720) * Fancy compact encode/decode impl for compact solution * Make it optional * Remove extra file * Update primitives/npos-elections/compact/src/lib.rs Co-authored-by: Guillaume Thiolliere * Final fixes. Co-authored-by: Guillaume Thiolliere --- frame/staking/src/lib.rs | 35 +- .../fuzzer/src/per_thing_rational.rs | 1 - .../npos-elections/compact/src/assignment.rs | 82 ++-- .../npos-elections/compact/src/codec.rs | 203 ++++++++++ primitives/npos-elections/compact/src/lib.rs | 237 +++++++----- .../npos-elections/compact/src/staked.rs | 212 ----------- primitives/npos-elections/src/lib.rs | 16 +- primitives/npos-elections/src/tests.rs | 359 ++++++------------ 8 files changed, 527 insertions(+), 618 deletions(-) create mode 100644 primitives/npos-elections/compact/src/codec.rs delete mode 100644 primitives/npos-elections/compact/src/staked.rs diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 3e89df78414..33945c8cbdc 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -326,7 +326,7 @@ use frame_system::{ }; use sp_npos_elections::{ ExtendedBalance, Assignment, ElectionScore, ElectionResult as PrimitiveElectionResult, - build_support_map, evaluate_support, seq_phragmen, generate_compact_solution_type, + build_support_map, evaluate_support, seq_phragmen, generate_solution_type, is_score_better, VotingLimit, SupportMap, VoteWeight, }; @@ -368,19 +368,10 @@ pub type EraIndex = u32; pub type RewardPoint = u32; // Note: Maximum nomination limit is set here -- 16. -generate_compact_solution_type!(pub GenericCompactAssignments, 16); - -/// Information regarding the active era (era in used in session). -#[derive(Encode, Decode, RuntimeDebug)] -pub struct ActiveEraInfo { - /// Index of era. - pub index: EraIndex, - /// Moment of start expressed as millisecond from `$UNIX_EPOCH`. - /// - /// Start can be none if start hasn't been set for the era yet, - /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. - start: Option, -} +generate_solution_type!( + #[compact] + pub struct CompactAssignments::(16) +); /// Accuracy used for on-chain election. pub type ChainAccuracy = Perbill; @@ -392,15 +383,23 @@ pub type OffchainAccuracy = PerU16; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// The compact type for election solutions. -pub type CompactAssignments = - GenericCompactAssignments; - type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +/// Information regarding the active era (era in used in session). +#[derive(Encode, Decode, RuntimeDebug)] +pub struct ActiveEraInfo { + /// Index of era. + pub index: EraIndex, + /// Moment of start expressed as millisecond from `$UNIX_EPOCH`. + /// + /// Start can be none if start hasn't been set for the era yet, + /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. + start: Option, +} + /// Reward points of an era. Used to split era total payout between validators. /// /// This points will be used to reward validators and their respective nominators. diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index fc22eacc9e4..8ddbd0c6d59 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -118,6 +118,5 @@ fn assert_per_thing_equal_error(a: P, b: P, err: u128) { let a_abs = a.deconstruct().saturated_into::(); let b_abs = b.deconstruct().saturated_into::(); let diff = a_abs.max(b_abs) - a_abs.min(b_abs); - dbg!(&diff); assert!(diff <= err, "{:?} !~ {:?}", a, b); } diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs index 96c68ece92a..218cd4f95a7 100644 --- a/primitives/npos-elections/compact/src/assignment.rs +++ b/primitives/npos-elections/compact/src/assignment.rs @@ -15,11 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Code generation for the ratio assignment type. +//! Code generation for the ratio assignment type' compact representation. use crate::field_name_for; use proc_macro2::TokenStream as TokenStream2; -use syn::GenericArgument; use quote::quote; fn from_impl(count: usize) -> TokenStream2 { @@ -27,8 +26,8 @@ fn from_impl(count: usize) -> TokenStream2 { let name = field_name_for(1); quote!(1 => compact.#name.push( ( - index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, - index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_voter(&who).or_invalid_index()?, + index_of_target(&distribution[0].0).or_invalid_index()?, ) ),) }; @@ -37,29 +36,29 @@ fn from_impl(count: usize) -> TokenStream2 { let name = field_name_for(2); quote!(2 => compact.#name.push( ( - index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_voter(&who).or_invalid_index()?, ( - index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_target(&distribution[0].0).or_invalid_index()?, distribution[0].1, ), - index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_target(&distribution[1].0).or_invalid_index()?, ) ),) }; let from_impl_rest = (3..=count).map(|c| { let inner = (0..c-1).map(|i| - quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) + quote!((index_of_target(&distribution[#i].0).or_invalid_index()?, distribution[#i].1),) ).collect::(); let field_name = field_name_for(c); let last_index = c - 1; - let last = quote!(index_of_target(&distribution[#last_index].0).ok_or(_phragmen::Error::CompactInvalidIndex)?); + let last = quote!(index_of_target(&distribution[#last_index].0).or_invalid_index()?); quote!( #c => compact.#field_name.push( ( - index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_voter(&who).or_invalid_index()?, [#inner], #last, ) @@ -74,15 +73,15 @@ fn from_impl(count: usize) -> TokenStream2 { ) } -fn into_impl(count: usize) -> TokenStream2 { +fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { let into_impl_single = { let name = field_name_for(1); quote!( for (voter_index, target_index) in self.#name { assignments.push(_phragmen::Assignment { - who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + who: voter_at(voter_index).or_invalid_index()?, distribution: vec![ - (target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, Accuracy::one()) + (target_at(target_index).or_invalid_index()?, #per_thing::one()) ], }) } @@ -93,21 +92,21 @@ fn into_impl(count: usize) -> TokenStream2 { let name = field_name_for(2); quote!( for (voter_index, (t1_idx, p1), t2_idx) in self.#name { - if p1 >= Accuracy::one() { + if p1 >= #per_thing::one() { return Err(_phragmen::Error::CompactStakeOverflow); } // defensive only. Since Percent doesn't have `Sub`. let p2 = _phragmen::sp_arithmetic::traits::Saturating::saturating_sub( - Accuracy::one(), + #per_thing::one(), p1, ); assignments.push( _phragmen::Assignment { - who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + who: voter_at(voter_index).or_invalid_index()?, distribution: vec![ - (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p1), - (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p2), + (target_at(t1_idx).or_invalid_index()?, p1), + (target_at(t2_idx).or_invalid_index()?, p2), ] }); } @@ -118,30 +117,30 @@ fn into_impl(count: usize) -> TokenStream2 { let name = field_name_for(c); quote!( for (voter_index, inners, t_last_idx) in self.#name { - let mut sum = Accuracy::zero(); + let mut sum = #per_thing::zero(); let mut inners_parsed = inners .iter() .map(|(ref t_idx, p)| { sum = _phragmen::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); - let target = target_at(*t_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) }) - .collect::, _phragmen::Error>>()?; + .collect::, _phragmen::Error>>()?; - if sum >= Accuracy::one() { + if sum >= #per_thing::one() { return Err(_phragmen::Error::CompactStakeOverflow); } // defensive only. Since Percent doesn't have `Sub`. let p_last = _phragmen::sp_arithmetic::traits::Saturating::saturating_sub( - Accuracy::one(), + #per_thing::one(), sum, ); - inners_parsed.push((target_at(t_last_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p_last)); + inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); assignments.push(_phragmen::Assignment { - who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + who: voter_at(voter_index).or_invalid_index()?, distribution: inners_parsed, }); } @@ -157,39 +156,28 @@ fn into_impl(count: usize) -> TokenStream2 { pub(crate) fn assignment( ident: syn::Ident, - voter_type: GenericArgument, - target_type: GenericArgument, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, count: usize, ) -> TokenStream2 { - let from_impl = from_impl(count); - let into_impl = into_impl(count); + let into_impl = into_impl(count, weight_type.clone()); quote!( - impl< - #voter_type: _phragmen::codec::Codec + Default + Copy, - #target_type: _phragmen::codec::Codec + Default + Copy, - Accuracy: - _phragmen::codec::Codec + Default + Clone + _phragmen::sp_arithmetic::PerThing + - PartialOrd, - > - #ident<#voter_type, #target_type, Accuracy> - { + use _phragmen::__OrInvalidIndex; + impl #ident { pub fn from_assignment( - assignments: Vec<_phragmen::Assignment>, + assignments: Vec<_phragmen::Assignment>, index_of_voter: FV, index_of_target: FT, ) -> Result where + A: _phragmen::IdentifierT, for<'r> FV: Fn(&'r A) -> Option<#voter_type>, for<'r> FT: Fn(&'r A) -> Option<#target_type>, - A: _phragmen::IdentifierT, { - let mut compact: #ident< - #voter_type, - #target_type, - Accuracy, - > = Default::default(); + let mut compact: #ident = Default::default(); for _phragmen::Assignment { who, distribution } in assignments { match distribution.len() { @@ -207,8 +195,8 @@ pub(crate) fn assignment( self, voter_at: impl Fn(#voter_type) -> Option, target_at: impl Fn(#target_type) -> Option, - ) -> Result>, _phragmen::Error> { - let mut assignments: Vec<_phragmen::Assignment> = Default::default(); + ) -> Result>, _phragmen::Error> { + let mut assignments: Vec<_phragmen::Assignment> = Default::default(); #into_impl Ok(assignments) } diff --git a/primitives/npos-elections/compact/src/codec.rs b/primitives/npos-elections/compact/src/codec.rs new file mode 100644 index 00000000000..0a475bdddcd --- /dev/null +++ b/primitives/npos-elections/compact/src/codec.rs @@ -0,0 +1,203 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Code generation for the ratio assignment type' encode/decode impl. + +use crate::field_name_for; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +pub(crate) fn codec_impl( + ident: syn::Ident, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, + count: usize, +) -> TokenStream2 { + let encode = encode_impl(ident.clone(), count); + let decode = decode_impl(ident, voter_type, target_type, weight_type, count); + + quote! { + #encode + #decode + } +} + +fn decode_impl( + ident: syn::Ident, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, + count: usize, +) -> TokenStream2 { + let decode_impl_single = { + let name = field_name_for(1); + quote! { + let #name = + < + Vec<(_phragmen::codec::Compact<#voter_type>, _phragmen::codec::Compact<#target_type>)> + as + _phragmen::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, t)| (v.0, t.0)) + .collect::>(); + } + }; + + let decode_impl_double = { + let name = field_name_for(2); + quote! { + let #name = + < + Vec<( + _phragmen::codec::Compact<#voter_type>, + (_phragmen::codec::Compact<#target_type>, _phragmen::codec::Compact<#weight_type>), + _phragmen::codec::Compact<#target_type>, + )> + as + _phragmen::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, (t1, w), t2)| (v.0, (t1.0, w.0), t2.0)) + .collect::>(); + } + }; + + let decode_impl_rest = (3..=count).map(|c| { + let name = field_name_for(c); + + let inner_impl = (0..c-1).map(|i| + quote! { ( (inner[#i].0).0, (inner[#i].1).0 ), } + ).collect::(); + + quote! { + let #name = + < + Vec<( + _phragmen::codec::Compact<#voter_type>, + [(_phragmen::codec::Compact<#target_type>, _phragmen::codec::Compact<#weight_type>); #c-1], + _phragmen::codec::Compact<#target_type>, + )> + as _phragmen::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, inner, t_last)| ( + v.0, + [ #inner_impl ], + t_last.0, + )) + .collect::>(); + } + }).collect::(); + + + let all_field_names = (1..=count).map(|c| { + let name = field_name_for(c); + quote! { #name, } + }).collect::(); + + quote!( + impl _phragmen::codec::Decode for #ident { + fn decode(value: &mut I) -> Result { + #decode_impl_single + #decode_impl_double + #decode_impl_rest + + // The above code generates variables with the decoded value with the same name as + // filed names of the struct, i.e. `let votes4 = decode_value_of_votes4`. All we + // have to do is collect them into the main struct now. + Ok(#ident { #all_field_names }) + } + } + ) +} + +// General attitude is that we will convert inner values to `Compact` and then use the normal +// `Encode` implementation. +fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { + let encode_impl_single = { + let name = field_name_for(1); + quote! { + let #name = self.#name + .iter() + .map(|(v, t)| ( + _phragmen::codec::Compact(v.clone()), + _phragmen::codec::Compact(t.clone()), + )) + .collect::>(); + #name.encode_to(&mut r); + } + }; + + let encode_impl_double = { + let name = field_name_for(2); + quote! { + let #name = self.#name + .iter() + .map(|(v, (t1, w), t2)| ( + _phragmen::codec::Compact(v.clone()), + ( + _phragmen::codec::Compact(t1.clone()), + _phragmen::codec::Compact(w.clone()) + ), + _phragmen::codec::Compact(t2.clone()), + )) + .collect::>(); + #name.encode_to(&mut r); + } + }; + + let encode_impl_rest = (3..=count).map(|c| { + let name = field_name_for(c); + + // we use the knowledge of the length to avoid copy_from_slice. + let inners_compact_array = (0..c-1).map(|i| + quote!{( + _phragmen::codec::Compact(inner[#i].0.clone()), + _phragmen::codec::Compact(inner[#i].1.clone()), + ),} + ).collect::(); + + quote! { + let #name = self.#name + .iter() + .map(|(v, inner, t_last)| ( + _phragmen::codec::Compact(v.clone()), + [ #inners_compact_array ], + _phragmen::codec::Compact(t_last.clone()), + )) + .collect::>(); + #name.encode_to(&mut r); + } + }).collect::(); + + quote!( + impl _phragmen::codec::Encode for #ident { + fn encode(&self) -> Vec { + let mut r = vec![]; + #encode_impl_single + #encode_impl_double + #encode_impl_rest + r + } + } + ) +} diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index 1b88ff65310..2852bdef250 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -21,103 +21,92 @@ use proc_macro::TokenStream; use proc_macro2::{TokenStream as TokenStream2, Span, Ident}; use proc_macro_crate::crate_name; use quote::quote; -use syn::{GenericArgument, Type, parse::{Parse, ParseStream, Result}}; +use syn::{parse::{Parse, ParseStream, Result}}; mod assignment; -mod staked; +mod codec; // prefix used for struct fields in compact. const PREFIX: &'static str = "votes"; -/// Generates a struct to store the election assignments in a compact way. The struct can only store -/// distributions up to the given input count. The given count must be greater than 2. +pub(crate) fn syn_err(message: &'static str) -> syn::Error { + syn::Error::new(Span::call_site(), message) +} + +/// Generates a struct to store the election result in a small way. This can encode a structure +/// which is the equivalent of a `sp_npos_elections::Assignment<_>`. /// -/// ```ignore -/// // generate a struct with nominator and edge weight u128, with maximum supported -/// // edge per voter of 16. -/// generate_compact_solution_type(pub TestCompact, 16) -/// ``` +/// The following data types can be configured by the macro. +/// +/// - The identifier of the voter. This can be any type that supports `parity-scale-codec`'s compact +/// encoding. +/// - The identifier of the target. This can be any type that supports `parity-scale-codec`'s +/// compact encoding. +/// - The accuracy of the ratios. This must be one of the `PerThing` types defined in +/// `sp-arithmetic`. +/// +/// Moreover, the maximum number of edges per voter (distribution per assignment) also need to be +/// specified. Attempting to convert from/to an assignment with more distributions will fail. /// -/// This generates: +/// +/// For example, the following generates a public struct with name `TestSolution` with `u16` voter +/// type, `u8` target type and `Perbill` accuracy with maximum of 8 edges per voter. /// /// ```ignore -/// pub struct TestCompact { -/// votes1: Vec<(V, T)>, -/// votes2: Vec<(V, (T, W), T)>, -/// votes3: Vec<(V, [(T, W); 2usize], T)>, -/// votes4: Vec<(V, [(T, W); 3usize], T)>, -/// votes5: Vec<(V, [(T, W); 4usize], T)>, -/// votes6: Vec<(V, [(T, W); 5usize], T)>, -/// votes7: Vec<(V, [(T, W); 6usize], T)>, -/// votes8: Vec<(V, [(T, W); 7usize], T)>, -/// votes9: Vec<(V, [(T, W); 8usize], T)>, -/// votes10: Vec<(V, [(T, W); 9usize], T)>, -/// votes11: Vec<(V, [(T, W); 10usize], T)>, -/// votes12: Vec<(V, [(T, W); 11usize], T)>, -/// votes13: Vec<(V, [(T, W); 12usize], T)>, -/// votes14: Vec<(V, [(T, W); 13usize], T)>, -/// votes15: Vec<(V, [(T, W); 14usize], T)>, -/// votes16: Vec<(V, [(T, W); 15usize], T)>, -/// } +/// generate_solution_type!(pub struct TestSolution::(8)) /// ``` /// -/// The generic arguments are: -/// - `V`: identifier/index for voter (nominator) types. -/// - `T` identifier/index for candidate (validator) types. -/// - `W` weight type. -/// -/// Some conversion implementations are provided by default if -/// - `W` is u128, or -/// - `W` is anything that implements `PerThing` (such as `Perbill`) +/// The given struct provides function to convert from/to Assignment: /// -/// The ideas behind the structure are as follows: +/// - [`from_assignment()`]. +/// - [`fn into_assignment()`]. /// -/// - For single distribution, no weight is stored. The weight is known to be 100%. -/// - For all the rest, the weight if the last distribution is omitted. This value can be computed -/// from the rest. +/// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could +/// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding +/// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works. /// +/// ```ignore +/// generate_solution_type!( +/// #[compact] +/// pub struct TestSolutionCompact::(8) +/// ) +/// ``` #[proc_macro] -pub fn generate_compact_solution_type(item: TokenStream) -> TokenStream { - let CompactSolutionDef { +pub fn generate_solution_type(item: TokenStream) -> TokenStream { + let SolutionDef { vis, ident, count, - } = syn::parse_macro_input!(item as CompactSolutionDef); - - let voter_type = GenericArgument::Type(Type::Verbatim(quote!(V))); - let target_type = GenericArgument::Type(Type::Verbatim(quote!(T))); - let weight_type = GenericArgument::Type(Type::Verbatim(quote!(W))); + voter_type, + target_type, + weight_type, + compact_encoding, + } = syn::parse_macro_input!(item as SolutionDef); let imports = imports().unwrap_or_else(|e| e.to_compile_error()); - let compact_def = struct_def( + let solution_struct = struct_def( vis, ident.clone(), count, voter_type.clone(), target_type.clone(), - weight_type, + weight_type.clone(), + compact_encoding, ).unwrap_or_else(|e| e.to_compile_error()); let assignment_impls = assignment::assignment( ident.clone(), voter_type.clone(), target_type.clone(), - count, - ); - - let staked_impls = staked::staked( - ident, - voter_type, - target_type, + weight_type.clone(), count, ); quote!( #imports - #compact_def + #solution_struct #assignment_impls - #staked_impls ).into() } @@ -125,25 +114,27 @@ fn struct_def( vis: syn::Visibility, ident: syn::Ident, count: usize, - voter_type: GenericArgument, - target_type: GenericArgument, - weight_type: GenericArgument, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, + compact_encoding: bool, ) -> Result { if count <= 2 { - Err(syn::Error::new( - Span::call_site(), - "cannot build compact solution struct with capacity less than 2." - ))? + Err(syn_err("cannot build compact solution struct with capacity less than 3."))? } let singles = { let name = field_name_for(1); - quote!(#name: Vec<(#voter_type, #target_type)>,) + quote!( + #name: Vec<(#voter_type, #target_type)>, + ) }; let doubles = { let name = field_name_for(2); - quote!(#name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>,) + quote!( + #name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, + ) }; let rest = (3..=count).map(|c| { @@ -175,31 +166,34 @@ fn struct_def( ) }).collect::(); + let derives_and_maybe_compact_encoding = if compact_encoding { + // custom compact encoding. + let compact_impl = codec::codec_impl( + ident.clone(), + voter_type.clone(), + target_type.clone(), + weight_type.clone(), + count, + ); + quote!{ + #compact_impl + #[derive(Default, PartialEq, Eq, Clone, Debug)] + } + } else { + // automatically derived. + quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _phragmen::codec::Encode, _phragmen::codec::Decode)]) + }; + Ok(quote! ( /// A struct to encode a election assignment in a compact way. - #[derive( - Default, - PartialEq, - Eq, - Clone, - Debug, - _phragmen::codec::Encode, - _phragmen::codec::Decode, - )] - #vis struct #ident<#voter_type, #target_type, #weight_type> { - // _marker: sp_std::marker::PhantomData, - #singles - #doubles - #rest - } + #derives_and_maybe_compact_encoding + #vis struct #ident { #singles #doubles #rest } - impl<#voter_type, #target_type, #weight_type> _phragmen::VotingLimit - for #ident<#voter_type, #target_type, #weight_type> - { + impl _phragmen::VotingLimit for #ident { const LIMIT: usize = #count; } - impl<#voter_type, #target_type, #weight_type> #ident<#voter_type, #target_type, #weight_type> { + impl #ident { /// Get the length of all the assignments that this type is encoding. This is basically /// the same as the number of assignments, or the number of voters in total. pub fn len(&self) -> usize { @@ -239,20 +233,79 @@ fn imports() -> Result { } } -struct CompactSolutionDef { +struct SolutionDef { vis: syn::Visibility, ident: syn::Ident, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, count: usize, + compact_encoding: bool, +} + +fn check_compact_attr(input: ParseStream) -> Result { + let mut attrs = input.call(syn::Attribute::parse_outer).unwrap_or_default(); + if attrs.len() == 1 { + let attr = attrs.pop().expect("Vec with len 1 can be popped."); + if attr.path.segments.len() == 1 { + let segment = attr.path.segments.first().expect("Vec with len 1 can be popped."); + if segment.ident == Ident::new("compact", Span::call_site()) { + Ok(true) + } else { + Err(syn_err("generate_solution_type macro can only accept #[compact] attribute.")) + } + } else { + Err(syn_err("generate_solution_type macro can only accept #[compact] attribute.")) + } + } else { + Ok(false) + } } -impl Parse for CompactSolutionDef { +/// #[compact] pub struct CompactName::() +impl Parse for SolutionDef { fn parse(input: ParseStream) -> syn::Result { + // optional #[compact] + let compact_encoding = check_compact_attr(input)?; + + // struct let vis: syn::Visibility = input.parse()?; + let _ = ::parse(input)?; let ident: syn::Ident = input.parse()?; - let _ = ::parse(input)?; - let count_literal: syn::LitInt = input.parse()?; - let count = count_literal.base10_parse::()?; - Ok(Self { vis, ident, count } ) + + // :: + let _ = ::parse(input)?; + let generics: syn::AngleBracketedGenericArguments = input.parse()?; + + if generics.args.len() != 3 { + return Err(syn_err("Must provide 3 generic args.")) + } + + let mut types: Vec = generics.args.iter().map(|t| + match t { + syn::GenericArgument::Type(ty) => Ok(ty.clone()), + _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), + } + ).collect::>()?; + + let weight_type = types.pop().expect("Vector of length 3 can be popped; qed"); + let target_type = types.pop().expect("Vector of length 2 can be popped; qed"); + let voter_type = types.pop().expect("Vector of length 1 can be popped; qed"); + + // () + let count_expr: syn::ExprParen = input.parse()?; + let expr = count_expr.expr; + let expr_lit = match *expr { + syn::Expr::Lit(count_lit) => count_lit.lit, + _ => return Err(syn_err("Count must be literal.")) + }; + let int_lit = match expr_lit { + syn::Lit::Int(int_lit) => int_lit, + _ => return Err(syn_err("Count must be int literal.")) + }; + let count = int_lit.base10_parse::()?; + + Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding } ) } } diff --git a/primitives/npos-elections/compact/src/staked.rs b/primitives/npos-elections/compact/src/staked.rs deleted file mode 100644 index e2680e18b63..00000000000 --- a/primitives/npos-elections/compact/src/staked.rs +++ /dev/null @@ -1,212 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Code generation for the staked assignment type. - -use crate::field_name_for; -use proc_macro2::{TokenStream as TokenStream2}; -use syn::{GenericArgument}; -use quote::quote; - -fn from_impl(count: usize) -> TokenStream2 { - let from_impl_single = { - let name = field_name_for(1); - quote!(1 => compact.#name.push( - ( - index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, - index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, - ) - ),) - }; - - let from_impl_double = { - let name = field_name_for(2); - quote!(2 => compact.#name.push( - ( - index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, - ( - index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, - distribution[0].1, - ), - index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, - ) - ),) - }; - - let from_impl_rest = (3..=count).map(|c| { - let inner = (0..c-1).map(|i| - quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) - ).collect::(); - - let field_name = field_name_for(c); - let last_index = c - 1; - let last = quote!(index_of_target(&distribution[#last_index].0).ok_or(_phragmen::Error::CompactInvalidIndex)?); - - quote!( - #c => compact.#field_name.push( - (index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, [#inner], #last) - ), - ) - }).collect::(); - - quote!( - #from_impl_single - #from_impl_double - #from_impl_rest - ) -} - -fn into_impl(count: usize) -> TokenStream2 { - let into_impl_single = { - let name = field_name_for(1); - quote!( - for (voter_index, target_index) in self.#name { - let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; - let all_stake: u128 = max_of(&who).into(); - assignments.push(_phragmen::StakedAssignment { - who, - distribution: vec![(target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, all_stake)], - }) - } - ) - }; - - let into_impl_double = { - let name = field_name_for(2); - quote!( - for (voter_index, (t1_idx, w1), t2_idx) in self.#name { - let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; - let all_stake: u128 = max_of(&who).into(); - - if w1 >= all_stake { - return Err(_phragmen::Error::CompactStakeOverflow); - } - - // w2 is ensured to be positive. - let w2 = all_stake - w1; - assignments.push( _phragmen::StakedAssignment { - who, - distribution: vec![ - (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w1), - (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w2), - ] - }); - } - ) - }; - - let into_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - quote!( - for (voter_index, inners, t_last_idx) in self.#name { - let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; - let mut sum = u128::min_value(); - let all_stake: u128 = max_of(&who).into(); - - let mut inners_parsed = inners - .iter() - .map(|(ref t_idx, w)| { - sum = sum.saturating_add(*w); - let target = target_at(*t_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?; - Ok((target, *w)) - }).collect::, _phragmen::Error>>()?; - - if sum >= all_stake { - return Err(_phragmen::Error::CompactStakeOverflow); - } - // w_last is proved to be positive. - let w_last = all_stake - sum; - - inners_parsed.push((target_at(t_last_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w_last)); - - assignments.push(_phragmen::StakedAssignment { - who, - distribution: inners_parsed, - }); - } - ) - }).collect::(); - - quote!( - #into_impl_single - #into_impl_double - #into_impl_rest - ) -} - -pub(crate) fn staked( - ident: syn::Ident, - voter_type: GenericArgument, - target_type: GenericArgument, - count: usize, -) -> TokenStream2 { - - let from_impl = from_impl(count); - let into_impl = into_impl(count); - - quote!( - impl< - #voter_type: _phragmen::codec::Codec + Default + Copy, - #target_type: _phragmen::codec::Codec + Default + Copy, - > - #ident<#voter_type, #target_type, u128> - { - /// Generate self from a vector of `StakedAssignment`. - pub fn from_staked( - assignments: Vec<_phragmen::StakedAssignment>, - index_of_voter: FV, - index_of_target: FT, - ) -> Result - where - for<'r> FV: Fn(&'r A) -> Option<#voter_type>, - for<'r> FT: Fn(&'r A) -> Option<#target_type>, - A: _phragmen::IdentifierT - { - let mut compact: #ident<#voter_type, #target_type, u128> = Default::default(); - for _phragmen::StakedAssignment { who, distribution } in assignments { - match distribution.len() { - 0 => continue, - #from_impl - _ => { - return Err(_phragmen::Error::CompactTargetOverflow); - } - } - }; - Ok(compact) - } - - /// Convert self into `StakedAssignment`. The given function should return the total - /// weight of a voter. It is used to subtract the sum of all the encoded weights to - /// infer the last one. - pub fn into_staked( - self, - max_of: FM, - voter_at: impl Fn(#voter_type) -> Option, - target_at: impl Fn(#target_type) -> Option, - ) - -> Result>, _phragmen::Error> - where - for<'r> FM: Fn(&'r A) -> u64, - A: _phragmen::IdentifierT, - { - let mut assignments: Vec<_phragmen::StakedAssignment> = Default::default(); - #into_impl - Ok(assignments) - } - } - ) -} diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 2b767d7c79b..58a69a11691 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -60,8 +60,22 @@ pub use codec; #[doc(hidden)] pub use sp_arithmetic; +/// Simple Extension trait to easily convert `None` from index closures to `Err`. +/// +/// This is only generated and re-exported for the compact solution code to use. +#[doc(hidden)] +pub trait __OrInvalidIndex { + fn or_invalid_index(self) -> Result; +} + +impl __OrInvalidIndex for Option { + fn or_invalid_index(self) -> Result { + self.ok_or(Error::CompactInvalidIndex) + } +} + // re-export the compact solution type. -pub use sp_npos_elections_compact::generate_compact_solution_type; +pub use sp_npos_elections_compact::generate_solution_type; /// A trait to limit the number of votes per voter. The generated compact type will implement this. pub trait VotingLimit { diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index c630f0ae359..8e99d2222e8 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -920,28 +920,76 @@ mod score { } } -mod compact { +mod solution_type { use codec::{Decode, Encode}; use super::AccountId; // these need to come from the same dev-dependency `sp-npos-elections`, not from the crate. use crate::{ - generate_compact_solution_type, VoteWeight, Assignment, StakedAssignment, - Error as PhragmenError, ExtendedBalance, + generate_solution_type, Assignment, + Error as PhragmenError, }; - use sp_std::{convert::{TryInto, TryFrom}, fmt::Debug}; + use sp_std::{convert::TryInto, fmt::Debug}; use sp_arithmetic::Percent; - type Accuracy = Percent; + type TestAccuracy = Percent; - generate_compact_solution_type!(TestCompact, 16); + generate_solution_type!(pub struct TestSolutionCompact::(16)); + + #[allow(dead_code)] + mod __private { + // This is just to make sure that that the compact can be generated in a scope without any + // imports. + use crate::generate_solution_type; + use sp_arithmetic::Percent; + generate_solution_type!( + #[compact] + struct InnerTestSolutionCompact::(12) + ); + + } + + #[test] + fn solution_struct_works_with_and_without_compact() { + // we use u32 size to make sure compact is smaller. + let without_compact = { + generate_solution_type!(pub struct InnerTestSolution::(16)); + let compact = InnerTestSolution { + votes1: vec![(2, 20), (4, 40)], + votes2: vec![ + (1, (10, TestAccuracy::from_percent(80)), 11), + (5, (50, TestAccuracy::from_percent(85)), 51), + ], + ..Default::default() + }; + + compact.encode().len() + }; + + let with_compact = { + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + let compact = InnerTestSolutionCompact { + votes1: vec![(2, 20), (4, 40)], + votes2: vec![ + (1, (10, TestAccuracy::from_percent(80)), 11), + (5, (50, TestAccuracy::from_percent(85)), 51), + ], + ..Default::default() + }; + + compact.encode().len() + }; + + dbg!(with_compact, without_compact); + assert!(with_compact < without_compact); + } #[test] - fn compact_struct_is_codec() { - let compact = TestCompact::<_, _, _> { - votes1: vec![(2u64, 20), (4, 40)], + fn solution_struct_is_codec() { + let compact = TestSolutionCompact { + votes1: vec![(2, 20), (4, 40)], votes2: vec![ - (1, (10, Accuracy::from_percent(80)), 11), - (5, (50, Accuracy::from_percent(85)), 51), + (1, (10, TestAccuracy::from_percent(80)), 11), + (5, (50, TestAccuracy::from_percent(85)), 51), ], ..Default::default() }; @@ -956,14 +1004,8 @@ mod compact { assert_eq!(compact.edge_count(), 2 + 4); } - fn basic_ratio_test_with() where - V: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, - T: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - { + #[test] + fn basic_from_and_into_compact_works_assignments() { let voters = vec![ 2 as AccountId, 4, @@ -986,44 +1028,44 @@ mod compact { let assignments = vec![ Assignment { who: 2 as AccountId, - distribution: vec![(20u64, Accuracy::from_percent(100))] + distribution: vec![(20u64, TestAccuracy::from_percent(100))] }, Assignment { who: 4, - distribution: vec![(40, Accuracy::from_percent(100))], + distribution: vec![(40, TestAccuracy::from_percent(100))], }, Assignment { who: 1, distribution: vec![ - (10, Accuracy::from_percent(80)), - (11, Accuracy::from_percent(20)) + (10, TestAccuracy::from_percent(80)), + (11, TestAccuracy::from_percent(20)) ], }, Assignment { who: 5, distribution: vec![ - (50, Accuracy::from_percent(85)), - (51, Accuracy::from_percent(15)), + (50, TestAccuracy::from_percent(85)), + (51, TestAccuracy::from_percent(15)), ] }, Assignment { who: 3, distribution: vec![ - (30, Accuracy::from_percent(50)), - (31, Accuracy::from_percent(25)), - (32, Accuracy::from_percent(25)), + (30, TestAccuracy::from_percent(50)), + (31, TestAccuracy::from_percent(25)), + (32, TestAccuracy::from_percent(25)), ], }, ]; - let voter_index = |a: &AccountId| -> Option { + let voter_index = |a: &AccountId| -> Option { voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let target_index = |a: &AccountId| -> Option { + let target_index = |a: &AccountId| -> Option { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = >::from_assignment( + let compacted = TestSolutionCompact::from_assignment( assignments.clone(), voter_index, target_index, @@ -1038,266 +1080,89 @@ mod compact { assert_eq!( compacted, - TestCompact { - votes1: vec![(V::from(0u8), T::from(2u8)), (V::from(1u8), T::from(6u8))], + TestSolutionCompact { + votes1: vec![(0, 2), (1, 6)], votes2: vec![ - (V::from(2u8), (T::from(0u8), Accuracy::from_percent(80)), T::from(1u8)), - (V::from(3u8), (T::from(7u8), Accuracy::from_percent(85)), T::from(8u8)), + (2, (0, TestAccuracy::from_percent(80)), 1), + (3, (7, TestAccuracy::from_percent(85)), 8), ], votes3: vec![ ( - V::from(4), - [(T::from(3u8), Accuracy::from_percent(50)), (T::from(4u8), Accuracy::from_percent(25))], - T::from(5u8), + 4, + [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], + 5, ), ], ..Default::default() } ); - let voter_at = |a: V| -> Option { voters.get(>::try_into(a).unwrap()).cloned() }; - let target_at = |a: T| -> Option { targets.get(>::try_into(a).unwrap()).cloned() }; - - assert_eq!( - compacted.into_assignment(voter_at, target_at).unwrap(), - assignments, - ); - } - - #[test] - fn basic_from_and_into_compact_works_assignments() { - basic_ratio_test_with::(); - basic_ratio_test_with::(); - basic_ratio_test_with::(); - } - - #[test] - fn basic_from_and_into_compact_works_staked_assignments() { - let voters = vec![ - 2 as AccountId, - 4, - 1, - 5, - 3, - ]; - let targets = vec![ - 10 as AccountId, 11, - 20, - 30, 31, 32, - 40, - 50, 51, - ]; - - let assignments = vec![ - StakedAssignment { - who: 2 as AccountId, - distribution: vec![(20, 100 as ExtendedBalance)] - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 100)], - }, - StakedAssignment { - who: 1, - distribution: vec![ - (10, 80), - (11, 20) - ], - }, - StakedAssignment { - who: 5, distribution: - vec![ - (50, 85), - (51, 15), - ] - }, - StakedAssignment { - who: 3, - distribution: vec![ - (30, 50), - (31, 25), - (32, 25), - ], - }, - ]; - - let voter_index = |a: &AccountId| -> Option { - voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + let voter_at = |a: u32| -> Option { + voters.get(>::try_into(a).unwrap()).cloned() }; - let target_index = |a: &AccountId| -> Option { - targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + let target_at = |a: u8| -> Option { + targets.get(>::try_into(a).unwrap()).cloned() }; - let compacted = >::from_staked( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); - assert_eq!(compacted.len(), assignments.len()); - assert_eq!( - compacted.edge_count(), - assignments.iter().fold(0, |a, b| a + b.distribution.len()), - ); - - assert_eq!( - compacted, - TestCompact { - votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (2, (0, 80), 1), - (3, (7, 85), 8), - ], - votes3: vec![ - (4, [(3, 50), (4, 25)], 5), - ], - ..Default::default() - } - ); - - let max_of_fn = |_: &AccountId| -> VoteWeight { 100 }; - let voter_at = |a: u16| -> Option { voters.get(a as usize).cloned() }; - let target_at = |a: u16| -> Option { targets.get(a as usize).cloned() }; - assert_eq!( - compacted.into_staked( - max_of_fn, - voter_at, - target_at, - ).unwrap(), + compacted.into_assignment(voter_at, target_at).unwrap(), assignments, ); } - #[test] - fn compact_into_stake_must_report_overflow() { - // The last edge which is computed from the rest should ALWAYS be positive. - // in votes2 - let compact = TestCompact:: { - votes1: Default::default(), - votes2: vec![(0, (1, 10), 2)], - ..Default::default() - }; - - let entity_at = |a: u16| -> Option { Some(a as AccountId) }; - let max_of = |_: &AccountId| -> VoteWeight { 5 }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - - // in votes3 onwards - let compact = TestCompact:: { - votes1: Default::default(), - votes2: Default::default(), - votes3: vec![(0, [(1, 7), (2, 8)], 3)], - ..Default::default() - }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - - // Also if equal - let compact = TestCompact:: { - votes1: Default::default(), - votes2: Default::default(), - // 5 is total, we cannot leave none for 30 here. - votes3: vec![(0, [(1, 3), (2, 2)], 3)], - ..Default::default() - }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - } - #[test] fn compact_into_assignment_must_report_overflow() { // in votes2 - let compact = TestCompact:: { + let compact = TestSolutionCompact { votes1: Default::default(), - votes2: vec![(0, (1, Accuracy::from_percent(100)), 2)], + votes2: vec![(0, (1, TestAccuracy::from_percent(100)), 2)], ..Default::default() }; - let entity_at = |a: u16| -> Option { Some(a as AccountId) }; + let voter_at = |a: u32| -> Option { Some(a as AccountId) }; + let target_at = |a: u8| -> Option { Some(a as AccountId) }; + assert_eq!( - compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + compact.into_assignment(&voter_at, &target_at).unwrap_err(), PhragmenError::CompactStakeOverflow, ); // in votes3 onwards - let compact = TestCompact:: { + let compact = TestSolutionCompact { votes1: Default::default(), votes2: Default::default(), - votes3: vec![(0, [(1, Accuracy::from_percent(70)), (2, Accuracy::from_percent(80))], 3)], + votes3: vec![(0, [(1, TestAccuracy::from_percent(70)), (2, TestAccuracy::from_percent(80))], 3)], ..Default::default() }; assert_eq!( - compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + compact.into_assignment(&voter_at, &target_at).unwrap_err(), PhragmenError::CompactStakeOverflow, ); } #[test] fn target_count_overflow_is_detected() { - let assignments = vec![ - StakedAssignment { - who: 1 as AccountId, - distribution: (10..26).map(|i| (i as AccountId, i as ExtendedBalance)).collect::>(), - }, - ]; - - let entity_index = |a: &AccountId| -> Option { Some(*a as u16) }; - - let compacted = >::from_staked( - assignments.clone(), - entity_index, - entity_index, - ); - - assert!(compacted.is_ok()); - - let assignments = vec![ - StakedAssignment { - who: 1 as AccountId, - distribution: (10..27).map(|i| (i as AccountId, i as ExtendedBalance)).collect::>(), - }, - ]; - - let compacted = >::from_staked( - assignments.clone(), - entity_index, - entity_index, - ); - - assert_eq!( - compacted.unwrap_err(), - PhragmenError::CompactTargetOverflow, - ); + let voter_index = |a: &AccountId| -> Option { Some(*a as u32) }; + let target_index = |a: &AccountId| -> Option { Some(*a as u8) }; let assignments = vec![ Assignment { who: 1 as AccountId, - distribution: (10..27).map(|i| (i as AccountId, Percent::from_parts(i as u8))).collect::>(), + distribution: + (10..27) + .map(|i| (i as AccountId, Percent::from_parts(i as u8))) + .collect::>(), }, ]; - let compacted = >::from_assignment( + let compacted = TestSolutionCompact::from_assignment( assignments.clone(), - entity_index, - entity_index, - ); - - assert_eq!( - compacted.unwrap_err(), - PhragmenError::CompactTargetOverflow, + voter_index, + target_index, ); + assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow); } #[test] @@ -1306,24 +1171,24 @@ mod compact { let targets = vec![10 as AccountId, 11]; let assignments = vec![ - StakedAssignment { + Assignment { who: 1 as AccountId, - distribution: vec![(10, 100 as ExtendedBalance), (11, 100)] + distribution: vec![(10, Percent::from_percent(50)), (11, Percent::from_percent(50))], }, - StakedAssignment { + Assignment { who: 2, distribution: vec![], }, ]; - let voter_index = |a: &AccountId| -> Option { + let voter_index = |a: &AccountId| -> Option { voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let target_index = |a: &AccountId| -> Option { + let target_index = |a: &AccountId| -> Option { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = >::from_staked( + let compacted = TestSolutionCompact::from_assignment( assignments.clone(), voter_index, target_index, @@ -1331,9 +1196,9 @@ mod compact { assert_eq!( compacted, - TestCompact { + TestSolutionCompact { votes1: Default::default(), - votes2: vec![(0, (0, 100), 1)], + votes2: vec![(0, (0, Percent::from_percent(50)), 1)], ..Default::default() } ); -- GitLab From 865321f7b980dba17809bcf192bd206443bc9258 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 11 Aug 2020 14:09:52 +0200 Subject: [PATCH 075/132] state_getSize RPC for storage maps (#6847) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fancy compact encode/decode impl for compact solution * Make it optional * Remove extra file * Update primitives/npos-elections/compact/src/lib.rs Co-authored-by: Guillaume Thiolliere * Final fixes. * getSize rpc should work for maps as well * Fix future types * Remove minimum_validator_count stale const * Update client/rpc/src/state/mod.rs Co-authored-by: Bastian Köcher * "Optimize" `storage_size` * Remove unused import * Update doc Co-authored-by: Guillaume Thiolliere Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/rpc/src/state/mod.rs | 8 ++++---- client/rpc/src/state/state_full.rs | 30 +++++++++++++++++++++++++++++ client/rpc/src/state/state_light.rs | 8 ++++++++ client/rpc/src/state/tests.rs | 8 +++++++- 4 files changed, 49 insertions(+), 5 deletions(-) diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index ded87c73dc8..01c7c5f1eb4 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -97,14 +97,14 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns the size of a storage entry at a block's state. + /// + /// If data is available at `key`, it is returned. Else, the sum of values who's key has `key` + /// prefix is returned, i.e. all the storage (double) maps that have this prefix. fn storage_size( &self, block: Option, key: StorageKey, - ) -> FutureResult> { - Box::new(self.storage(block, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } + ) -> FutureResult>; /// Returns the runtime metadata as an opaque blob. fn metadata(&self, block: Option) -> FutureResult; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 3a5580c539a..fda73cea271 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -298,6 +298,36 @@ impl StateBackend for FullState, + key: StorageKey, + ) -> FutureResult> { + let block = match self.block_or_best(block) { + Ok(b) => b, + Err(e) => return Box::new(result(Err(client_err(e)))), + }; + + match self.client.storage(&BlockId::Hash(block), &key) { + Ok(Some(d)) => return Box::new(result(Ok(Some(d.0.len() as u64)))), + Err(e) => return Box::new(result(Err(client_err(e)))), + Ok(None) => {}, + } + + Box::new(result( + self.client.storage_pairs(&BlockId::Hash(block), &key) + .map(|kv| { + let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); + if item_sum > 0 { + Some(item_sum) + } else { + None + } + }) + .map_err(client_err) + )) + } + fn storage_hash( &self, block: Option, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 01fd1c16018..8f4dce08b3f 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -214,6 +214,14 @@ impl StateBackend for LightState, + _: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + fn storage( &self, block: Option, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 0cc16ce8d5e..b6677a1f2ff 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -53,6 +53,9 @@ fn should_return_storage() { let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) + // similar to a map with two keys + .add_extra_storage(b":map:acc1".to_vec(), vec![1, 2]) + .add_extra_storage(b":map:acc2".to_vec(), vec![1, 2, 3]) .build(); let genesis_hash = client.genesis_hash(); let (client, child) = new_full(Arc::new(client), SubscriptionManager::new(Arc::new(TaskExecutor))); @@ -72,6 +75,10 @@ fn should_return_storage() { client.storage_size(key.clone(), None).wait().unwrap().unwrap() as usize, VALUE.len(), ); + assert_eq!( + client.storage_size(StorageKey(b":map".to_vec()), None).wait().unwrap().unwrap() as usize, + 2 + 3, + ); assert_eq!( executor::block_on( child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) @@ -80,7 +87,6 @@ fn should_return_storage() { ).unwrap().unwrap() as usize, CHILD_VALUE.len(), ); - } #[test] -- GitLab From a362997e6c92fb454c40ab515446614ae14f72af Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 11 Aug 2020 16:12:30 +0200 Subject: [PATCH 076/132] Block packet size limit (#6398) * Block packet size limit * Update client/network/src/protocol.rs Co-authored-by: Pierre Krieger * Add block response limit Co-authored-by: Pierre Krieger --- client/network/src/block_requests.rs | 41 +++++++++++++++++++++------- client/network/src/protocol.rs | 7 ++++- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs index 1aa557d6cdc..d7a12816dd4 100644 --- a/client/network/src/block_requests.rs +++ b/client/network/src/block_requests.rs @@ -119,6 +119,7 @@ pub enum Event { #[derive(Debug, Clone)] pub struct Config { max_block_data_response: u32, + max_block_body_bytes: usize, max_request_len: usize, max_response_len: usize, inactivity_timeout: Duration, @@ -137,6 +138,7 @@ impl Config { pub fn new(id: &ProtocolId) -> Self { let mut c = Config { max_block_data_response: 128, + max_block_body_bytes: 8 * 1024 * 1024, max_request_len: 1024 * 1024, max_response_len: 16 * 1024 * 1024, inactivity_timeout: Duration::from_secs(15), @@ -171,6 +173,15 @@ impl Config { self } + /// Set the maximum total bytes of block bodies that are send in the response. + /// Note that at least one block is always sent regardless of the limit. + /// This should be lower than the value specified in `set_max_response_len` + /// accounting for headers, justifications and encoding overhead. + pub fn set_max_block_body_bytes(&mut self, v: usize) -> &mut Self { + self.max_block_body_bytes = v; + self + } + /// Set protocol to use for upgrade negotiation. pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { let mut v = Vec::new(); @@ -385,8 +396,11 @@ where let mut blocks = Vec::new(); let mut block_id = from_block_id; + let mut total_size = 0; while let Some(header) = self.chain.header(block_id).unwrap_or(None) { - if blocks.len() >= max_blocks as usize { + if blocks.len() >= max_blocks as usize + || (blocks.len() >= 1 && total_size > self.config.max_block_body_bytes) + { break } @@ -400,6 +414,20 @@ where }; let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); + let body = if get_body { + match self.chain.block_body(&BlockId::Hash(hash))? { + Some(mut extrinsics) => extrinsics.iter_mut() + .map(|extrinsic| extrinsic.encode()) + .collect(), + None => { + log::trace!(target: "sync", "Missing data for block request."); + break; + } + } + } else { + Vec::new() + }; + let block_data = schema::v1::BlockData { hash: hash.encode(), header: if get_header { @@ -407,21 +435,14 @@ where } else { Vec::new() }, - body: if get_body { - self.chain.block_body(&BlockId::Hash(hash))? - .unwrap_or_default() - .iter_mut() - .map(|extrinsic| extrinsic.encode()) - .collect() - } else { - Vec::new() - }, + body, receipt: Vec::new(), message_queue: Vec::new(), justification: justification.unwrap_or_default(), is_empty_justification, }; + total_size += block_data.body.len(); blocks.push(block_data); match direction { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 630471414b2..f1ce6d2b560 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -95,6 +95,9 @@ pub(crate) const MIN_VERSION: u32 = 3; // Maximum allowed entries in `BlockResponse` const MAX_BLOCK_DATA_RESPONSE: u32 = 128; +// Maximum total bytes allowed for block bodies in `BlockResponse` +const MAX_BODIES_BYTES: usize = 8 * 1024 * 1024; + /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -756,8 +759,9 @@ impl Protocol { let get_justification = request .fields .contains(message::BlockAttributes::JUSTIFICATION); + let mut total_size = 0; while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { - if blocks.len() >= max { + if blocks.len() >= max || (blocks.len() >= 1 && total_size > MAX_BODIES_BYTES) { break; } let number = *header.number(); @@ -788,6 +792,7 @@ impl Protocol { trace!(target: "sync", "Missing data for block request."); break; } + total_size += block_data.body.as_ref().map_or(0, |b| b.len()); blocks.push(block_data); match request.direction { message::Direction::Ascending => id = BlockId::Number(number + One::one()), -- GitLab From 6f575828c69baf2b56719fd925630092cef519d6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 11 Aug 2020 18:05:31 +0300 Subject: [PATCH 077/132] Move to upstream wasmtime, refactor globals snapshot (#6759) * refactor globals snapshot * ignore test * update pwasm-utils ref * line width * add doc comment for internal struct * add explanation for iteration * Demote rustdoc to a comment * use 0.14 Co-authored-by: Sergei Shulepov --- Cargo.lock | 57 ++++---- client/executor/wasmtime/Cargo.toml | 8 +- client/executor/wasmtime/src/imports.rs | 4 +- .../executor/wasmtime/src/instance_wrapper.rs | 20 ++- .../src/instance_wrapper/globals_snapshot.rs | 133 ++++++------------ client/network/src/service/tests.rs | 1 + 6 files changed, 83 insertions(+), 140 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e080fbcbaa1..781e5e14bad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6552,20 +6552,17 @@ name = "sc-executor-wasmtime" version = "0.8.0-rc5" dependencies = [ "assert_matches", - "cranelift-codegen", - "cranelift-wasm", "log", "parity-scale-codec", "parity-wasm 0.41.0", + "pwasm-utils", "sc-executor-common", "scoped-tls", "sp-allocator", "sp-core", "sp-runtime-interface", "sp-wasm-interface", - "substrate-wasmtime", - "wasmtime-environ", - "wasmtime-runtime", + "wasmtime", ] [[package]] @@ -8609,31 +8606,6 @@ dependencies = [ name = "substrate-wasm-builder-runner" version = "1.0.6" -[[package]] -name = "substrate-wasmtime" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a69f5b3afef86e3e372529bf3fb1f7219b20287c4490e4cb4b4e91970f4f5" -dependencies = [ - "anyhow", - "backtrace", - "cfg-if", - "lazy_static", - "libc", - "log", - "region", - "rustc-demangle", - "smallvec 1.4.1", - "target-lexicon", - "wasmparser 0.59.0", - "wasmtime-environ", - "wasmtime-jit", - "wasmtime-profiling", - "wasmtime-runtime", - "wat", - "winapi 0.3.8", -] - [[package]] name = "subtle" version = "1.0.0" @@ -9641,6 +9613,31 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a950e6a618f62147fd514ff445b2a0b53120d382751960797f85f058c7eda9b9" +[[package]] +name = "wasmtime" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" +dependencies = [ + "anyhow", + "backtrace", + "cfg-if", + "lazy_static", + "libc", + "log", + "region", + "rustc-demangle", + "smallvec 1.4.1", + "target-lexicon", + "wasmparser 0.59.0", + "wasmtime-environ", + "wasmtime-jit", + "wasmtime-profiling", + "wasmtime-runtime", + "wat", + "winapi 0.3.8", +] + [[package]] name = "wasmtime-debug" version = "0.19.0" diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 0267cf6efad..6eea4e6b14a 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -21,12 +21,8 @@ sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-in sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } -wasmtime = { package = "substrate-wasmtime", version = "0.19.0" } -wasmtime-runtime = { version = "0.19.0" } -wasmtime-environ = { version = "0.19.0" } -cranelift-wasm = { version = "0.66.0" } -cranelift-codegen = { version = "0.66.0" } - +wasmtime = "0.19" +pwasm-utils = "0.14.0" [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index 41498e2b0fa..add62df5cef 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -294,7 +294,7 @@ fn into_wasmtime_val_type(val_ty: ValueType) -> wasmtime::ValType { /// Converts a `Val` into a substrate runtime interface `Value`. /// /// Panics if the given value doesn't have a corresponding variant in `Value`. -fn into_value(val: Val) -> Value { +pub fn into_value(val: Val) -> Value { match val { Val::I32(v) => Value::I32(v), Val::I64(v) => Value::I64(v), @@ -304,7 +304,7 @@ fn into_value(val: Val) -> Value { } } -fn into_wasmtime_val(value: Value) -> wasmtime::Val { +pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { match value { Value::I32(v) => Val::I32(v), Value::I64(v) => Val::I64(v), diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index d31193688b9..9a4e44d3b10 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -29,36 +29,36 @@ use sc_executor_common::{ }; use sp_wasm_interface::{Pointer, WordSize, Value}; use wasmtime::{Engine, Instance, Module, Memory, Table, Val, Func, Extern, Global, Store}; +use parity_wasm::elements; mod globals_snapshot; pub use globals_snapshot::GlobalsSnapshot; pub struct ModuleWrapper { - imported_globals_count: u32, - globals_count: u32, module: Module, data_segments_snapshot: DataSegmentsSnapshot, } impl ModuleWrapper { pub fn new(engine: &Engine, code: &[u8]) -> Result { - let module = Module::new(engine, code) + let mut raw_module: elements::Module = elements::deserialize_buffer(code) + .map_err(|e| Error::from(format!("cannot decode module: {}", e)))?; + pwasm_utils::export_mutable_globals(&mut raw_module, "exported_internal_global"); + let instrumented_code = elements::serialize(raw_module) + .map_err(|e| Error::from(format!("cannot encode module: {}", e)))?; + + let module = Module::new(engine, &instrumented_code) .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; let module_info = WasmModuleInfo::new(code) .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; - let declared_globals_count = module_info.declared_globals_count(); - let imported_globals_count = module_info.imported_globals_count(); - let globals_count = imported_globals_count + declared_globals_count; let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; Ok(Self { module, - imported_globals_count, - globals_count, data_segments_snapshot, }) } @@ -78,8 +78,6 @@ impl ModuleWrapper { /// routines. pub struct InstanceWrapper { instance: Instance, - globals_count: u32, - imported_globals_count: u32, // The memory instance of the `instance`. // // It is important to make sure that we don't make any copies of this to make it easier to proof @@ -143,8 +141,6 @@ impl InstanceWrapper { Ok(Self { table: get_table(&instance), instance, - globals_count: module_wrapper.globals_count, - imported_globals_count: module_wrapper.imported_globals_count, memory, _not_send_nor_sync: marker::PhantomData, }) diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs index dd99d63ae25..42935d851d9 100644 --- a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs +++ b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs @@ -17,115 +17,68 @@ // along with this program. If not, see . use super::InstanceWrapper; -use sc_executor_common::{ - error::{Error, Result}, -}; +use sc_executor_common::error::{Result, Error}; use sp_wasm_interface::Value; -use cranelift_codegen::ir; -use cranelift_wasm::GlobalIndex; -use wasmtime_runtime::{ExportGlobal, Export}; +use crate::imports::{into_value, into_wasmtime_val}; + +/// Saved value of particular exported global. +struct SavedValue { + /// Index of the export. + index: usize, + /// Global value. + value: Value, +} /// A snapshot of a global variables values. This snapshot can be used later for restoring the /// values to the preserved state. /// /// Technically, a snapshot stores only values of mutable global variables. This is because /// immutable global variables always have the same values. -pub struct GlobalsSnapshot { - handle: wasmtime_runtime::InstanceHandle, - preserved_mut_globals: Vec<(*mut wasmtime_runtime::VMGlobalDefinition, Value)>, -} +pub struct GlobalsSnapshot(Vec); impl GlobalsSnapshot { /// Take a snapshot of global variables for a given instance. pub fn take(instance_wrapper: &InstanceWrapper) -> Result { - // EVIL: - // Usage of an undocumented function. - let handle = instance_wrapper.instance.handle().clone().handle; - - let mut preserved_mut_globals = vec![]; - - for global_idx in instance_wrapper.imported_globals_count..instance_wrapper.globals_count { - let (def, global) = match handle.lookup_by_declaration( - &wasmtime_environ::EntityIndex::Global(GlobalIndex::from_u32(global_idx)), - ) { - Export::Global(ExportGlobal { definition, global, .. }) => (definition, global), - _ => unreachable!("only globals can be returned for a global request"), - }; - - // skip immutable globals. - if !global.mutability { - continue; - } - - let value = unsafe { - // Safety of this function solely depends on the correctness of the reference and - // the type information of the global. - read_global(def, global.ty)? - }; - preserved_mut_globals.push((def, value)); - } - - Ok(Self { - preserved_mut_globals, - handle, - }) + let data = instance_wrapper.instance + .exports() + .enumerate() + .filter_map(|(index, export)| { + if export.name().starts_with("exported_internal_global") { + export.into_global().map( + |g| SavedValue { index, value: into_value(g.get()) } + ) + } else { None } + }) + .collect::>(); + + Ok(Self(data)) } /// Apply the snapshot to the given instance. /// /// This instance must be the same that was used for creation of this snapshot. pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { - if instance_wrapper.instance.handle().handle != self.handle { - return Err(Error::from("unexpected instance handle".to_string())); - } - - for (def, value) in &self.preserved_mut_globals { - unsafe { - // The following writes are safe if the precondition that this is the same instance - // this snapshot was created with: - // - // 1. These pointers must be still not-NULL and allocated. - // 2. The set of global variables is fixed for the lifetime of the same instance. - // 3. We obviously assume that the wasmtime references are correct in the first place. - // 4. We write the data with the same type it was read in the first place. - write_global(*def, *value)?; + // This is a pointer over saved items, it moves forward when the loop value below takes over it's current value. + // Since both pointers (`current` and `index` below) are over ordered lists, they eventually hit all + // equal referenced values. + let mut current = 0; + for (index, export) in instance_wrapper.instance.exports().enumerate() { + if current >= self.0.len() { break; } + let current_saved = &self.0[current]; + if index < current_saved.index { continue; } + else if index > current_saved.index { current += 1; continue; } + else { + export.into_global() + .ok_or_else(|| Error::Other( + "Wrong instance in GlobalsSnapshot::apply: what should be global is not global.".to_string() + ))? + .set(into_wasmtime_val(current_saved.value)) + .map_err(|_e| Error::Other( + "Wrong instance in GlobalsSnapshot::apply: global saved type does not matched applied.".to_string() + ))?; } } - Ok(()) - } -} -unsafe fn read_global( - def: *const wasmtime_runtime::VMGlobalDefinition, - ty: ir::Type, -) -> Result { - let def = def - .as_ref() - .ok_or_else(|| Error::from("wasmtime global reference is null during read".to_string()))?; - let val = match ty { - ir::types::I32 => Value::I32(*def.as_i32()), - ir::types::I64 => Value::I64(*def.as_i64()), - ir::types::F32 => Value::F32(*def.as_u32()), - ir::types::F64 => Value::F64(*def.as_u64()), - _ => { - return Err(Error::from(format!( - "unsupported global variable type: {}", - ty - ))) - } - }; - Ok(val) -} - -unsafe fn write_global(def: *mut wasmtime_runtime::VMGlobalDefinition, value: Value) -> Result<()> { - let def = def - .as_mut() - .ok_or_else(|| Error::from("wasmtime global reference is null during write".to_string()))?; - match value { - Value::I32(v) => *def.as_i32_mut() = v, - Value::I64(v) => *def.as_i64_mut() = v, - Value::F32(v) => *def.as_u32_mut() = v, - Value::F64(v) => *def.as_u64_mut() = v, + Ok(()) } - Ok(()) } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 0bfe507599c..031d1641d23 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -151,6 +151,7 @@ fn build_nodes_one_proto() (node1, events_stream1, node2, events_stream2) } +#[ignore] #[test] fn notifications_state_consistent() { // Runs two nodes and ensures that events are propagated out of the API in a consistent -- GitLab From 40643789096cc922371d5fdec7fd98245601420b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 11 Aug 2020 16:05:59 +0100 Subject: [PATCH 078/132] grandpa: change some logging from trace to debug (#6872) * grandpa: change some logging from trace to debug * grandpa: cleanup unused import --- client/finality-grandpa/src/communication/gossip.rs | 4 ++-- client/finality-grandpa/src/import.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 7d9fe4e7f2d..276529d555f 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -922,7 +922,7 @@ impl Inner { PendingCatchUp::Processing { .. } => { self.pending_catch_up = PendingCatchUp::None; }, - state => trace!(target: "afg", + state => debug!(target: "afg", "Noted processed catch up message when state was: {:?}", state, ), @@ -1043,7 +1043,7 @@ impl Inner { let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); if catch_up_allowed { - trace!(target: "afg", "Sending catch-up request for round {} to {}", + debug!(target: "afg", "Sending catch-up request for round {} to {}", round, who, ); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index c9f2d8d2f7b..d5b0a650096 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -18,7 +18,7 @@ use std::{sync::Arc, collections::HashMap}; -use log::{debug, trace}; +use log::debug; use parity_scale_codec::Encode; use parking_lot::RwLockWriteGuard; @@ -527,7 +527,7 @@ impl BlockImport }, None => { if needs_justification { - trace!( + debug!( target: "afg", "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", number, -- GitLab From 72addfaeed412a420ca90c2873d966d275c44171 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 11 Aug 2020 17:07:17 +0200 Subject: [PATCH 079/132] Fix wrong staking doc about transaction payment. (#6873) * Fx #4616 * Fix #4616 --- frame/staking/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 33945c8cbdc..858bb279a85 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -606,7 +606,10 @@ pub struct Nominations { /// /// Except for initial nominations which are considered submitted at era 0. pub submitted_in: EraIndex, - /// Whether the nominations have been suppressed. + /// Whether the nominations have been suppressed. This can happen due to slashing of the + /// validators, or other events that might invalidate the nomination. + /// + /// NOTE: this for future proofing and is thus far not used. pub suppressed: bool, } @@ -2444,8 +2447,9 @@ impl Module { Ok(()) } - /// Update the ledger for a controller. This will also update the stash lock. The lock will - /// will lock the entire funds except paying for further transactions. + /// Update the ledger for a controller. + /// + /// This will also update the stash lock. fn update_ledger( controller: &T::AccountId, ledger: &StakingLedger> -- GitLab From fe3fc04672788c288a492cb96efcc28819a333f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 11 Aug 2020 20:55:15 +0100 Subject: [PATCH 080/132] docs: convert code of conduct to markdown (#6878) --- ...ODE_OF_CONDUCT.adoc => CODE_OF_CONDUCT.md} | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) rename docs/{CODE_OF_CONDUCT.adoc => CODE_OF_CONDUCT.md} (77%) diff --git a/docs/CODE_OF_CONDUCT.adoc b/docs/CODE_OF_CONDUCT.md similarity index 77% rename from docs/CODE_OF_CONDUCT.adoc rename to docs/CODE_OF_CONDUCT.md index 0f7de7c7efe..400c9b3901e 100644 --- a/docs/CODE_OF_CONDUCT.adoc +++ b/docs/CODE_OF_CONDUCT.md @@ -1,10 +1,10 @@ -= Contributor Covenant Code of Conduct +# Contributor Covenant Code of Conduct -== Our Pledge +## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. -== Our Standards +## Our Standards Examples of behavior that contributes to creating a positive environment include: @@ -22,29 +22,31 @@ Examples of unacceptable behavior by participants include: * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting -=== Facilitation, Not Strong Arming +### Facilitation, Not Strongarming -We recognize that this software is merely a tool for users to create and maintain their blockchain of preference. We see that blockchains are naturally community platforms with users being the ultimate decision makers. We assert that good software will maximize user agency by facilitate user-expression on the network. As such: +We recognise that this software is merely a tool for users to create and maintain their blockchain of preference. We see that blockchains are naturally community platforms with users being the ultimate decision makers. We assert that good software will maximise user agency by facilitate user-expression on the network. As such: * This project will strive to give users as much choice as is both reasonable and possible over what protocol they adhere to; but * use of the project's technical forums, commenting systems, pull requests and issue trackers as a means to express individual protocol preferences is forbidden. -== Our Responsibilities +## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. -== Scope +## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. -== Enforcement +## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at admin@parity.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at . The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. -== Attribution +## Attribution -This Code of Conduct is adapted from the http://contributor-covenant.org[Contributor Covenant], version 1.4, available at http://contributor-covenant.org/version/1/4 +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://contributor-covenant.org/version/1/4 + +[homepage]: https://contributor-covenant.org -- GitLab From e7cc59551bb84d7e47c2b6e2d90b5a36491ded5d Mon Sep 17 00:00:00 2001 From: h4x3rotab Date: Wed, 12 Aug 2020 04:12:34 +0800 Subject: [PATCH 081/132] Add Phala Network SS58 address type (#6758) --- primitives/core/src/crypto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 2e71e676b3e..efacf0b2e76 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -448,6 +448,8 @@ ss58_address_format!( (20, "stafi", "Stafi mainnet, standard account (*25519).") SubsocialAccount => (28, "subsocial", "Subsocial network, standard account (*25519).") + PhalaAccount => + (30, "phala", "Phala Network, standard account (*25519).") RobonomicsAccount => (32, "robonomics", "Any Robonomics network standard account (*25519).") DataHighwayAccount => -- GitLab From a20fbd5cc4a4778b33059e924e0dc21edff3244c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 11 Aug 2020 22:21:45 +0100 Subject: [PATCH 082/132] docs: fix references to code of conduct document (#6879) --- README.md | 2 +- docs/README.adoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5764722373d..c586919a1dd 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ also try out one of the [tutorials](https://substrate.dev/en/tutorials). ## Contributions & Code of Conduct -Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.adoc`](docs/CONTRIBUTING.adoc). In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](docs/CODE_OF_CONDUCT.adoc). +Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.adoc`](docs/CONTRIBUTING.adoc). In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](docs/CODE_OF_CONDUCT.md). ## Security diff --git a/docs/README.adoc b/docs/README.adoc index 51e7748b67c..d1daeed07b5 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -523,7 +523,7 @@ include::CONTRIBUTING.adoc[] === Contributor Code of Conduct -include::CODE_OF_CONDUCT.adoc[] +include::CODE_OF_CONDUCT.md[] == License -- GitLab From d4efdf0a97ef8f669cbd8cfca2f7cad1972c3dea Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 12 Aug 2020 10:58:16 +0200 Subject: [PATCH 083/132] Fuse the import queue receiver (#6876) * Fix the import queue receiver * Add logging --- .../common/src/import_queue/basic_queue.rs | 28 ++++++++++++++++--- .../common/src/import_queue/buffered_link.rs | 18 ++++++------ 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index dddc332f43e..e59f7ab5b60 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -96,7 +96,13 @@ impl ImportQueue for BasicQueue } trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); - let _ = self.sender.unbounded_send(ToWorkerMsg::ImportBlocks(origin, blocks)); + let res = self.sender.unbounded_send(ToWorkerMsg::ImportBlocks(origin, blocks)); + if res.is_err() { + log::error!( + target: "sync", + "import_blocks: Background import task is no longer alive" + ); + } } fn import_justification( @@ -106,10 +112,16 @@ impl ImportQueue for BasicQueue number: NumberFor, justification: Justification ) { - let _ = self.sender + let res = self.sender .unbounded_send( ToWorkerMsg::ImportJustification(who, hash, number, justification) ); + if res.is_err() { + log::error!( + target: "sync", + "import_justification: Background import task is no longer alive" + ); + } } fn import_finality_proof( @@ -120,14 +132,22 @@ impl ImportQueue for BasicQueue finality_proof: Vec, ) { trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); - let _ = self.sender + let res = self.sender .unbounded_send( ToWorkerMsg::ImportFinalityProof(who, hash, number, finality_proof) ); + if res.is_err() { + log::error!( + target: "sync", + "import_finality_proof: Background import task is no longer alive" + ); + } } fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { - self.result_port.poll_actions(cx, link); + if self.result_port.poll_actions(cx, link).is_err() { + log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); + } } } diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/primitives/consensus/common/src/import_queue/buffered_link.rs index d85121a710e..a37d4c53c26 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -50,7 +50,7 @@ use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; pub fn buffered_link() -> (BufferedLinkSender, BufferedLinkReceiver) { let (tx, rx) = tracing_unbounded("mpsc_buffered_link"); let tx = BufferedLinkSender { tx }; - let rx = BufferedLinkReceiver { rx }; + let rx = BufferedLinkReceiver { rx: rx.fuse() }; (tx, rx) } @@ -127,7 +127,7 @@ impl Link for BufferedLinkSender { /// See [`buffered_link`]. pub struct BufferedLinkReceiver { - rx: TracingUnboundedReceiver>, + rx: stream::Fuse>>, } impl BufferedLinkReceiver { @@ -137,12 +137,14 @@ impl BufferedLinkReceiver { /// This method should behave in a way similar to `Future::poll`. It can register the current /// task and notify later when more actions are ready to be polled. To continue the comparison, /// it is as if this method always returned `Poll::Pending`. - pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { + /// + /// Returns an error if the corresponding [`BufferedLinkSender`] has been closed. + pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) -> Result<(), ()> { loop { - let msg = if let Poll::Ready(Some(msg)) = Stream::poll_next(Pin::new(&mut self.rx), cx) { - msg - } else { - break + let msg = match Stream::poll_next(Pin::new(&mut self.rx), cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) => break Err(()), + Poll::Pending => break Ok(()), }; match msg { @@ -162,7 +164,7 @@ impl BufferedLinkReceiver { /// Close the channel. pub fn close(&mut self) { - self.rx.close() + self.rx.get_mut().close() } } -- GitLab From d7979d0cd3ccde46ad1f6d33fc0327322c2ddaf6 Mon Sep 17 00:00:00 2001 From: Shaopeng Wang Date: Wed, 12 Aug 2020 21:21:36 +1200 Subject: [PATCH 084/132] Implement 'transactional' annotation for runtime functions. (#6763) * Implement 'transactional' annotation for runtime functions. * Allow function attributes for dispatchable calls in decl_module. * decl_module docs: add transactional function example. * decl_module docs: add function attributes notes. * Fix license header. --- frame/support/procedural/src/lib.rs | 26 +++++++++ frame/support/procedural/src/transactional.rs | 40 +++++++++++++ frame/support/src/dispatch.rs | 46 +++++++++++++-- frame/support/src/lib.rs | 2 +- .../support/test/tests/storage_transaction.rs | 57 ++++++++++++++++++- 5 files changed, 164 insertions(+), 7 deletions(-) create mode 100644 frame/support/procedural/src/transactional.rs diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 57c6080a90f..054d90d7bba 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -23,6 +23,7 @@ mod storage; mod construct_runtime; +mod transactional; use proc_macro::TokenStream; @@ -289,3 +290,28 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } + +/// Execute the annotated function in a new storage transaction. +/// +/// The return type of the annotated function must be `Result`. All changes to storage performed +/// by the annotated function are discarded if it returns `Err`, or committed if `Ok`. +/// +/// #Example +/// +/// ```nocompile +/// #[transactional] +/// fn value_commits(v: u32) -> result::Result { +/// Value::set(v); +/// Ok(v) +/// } +/// +/// #[transactional] +/// fn value_rollbacks(v: u32) -> result::Result { +/// Value::set(v); +/// Err("nah") +/// } +/// ``` +#[proc_macro_attribute] +pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { + transactional::transactional(attr, input) +} diff --git a/frame/support/procedural/src/transactional.rs b/frame/support/procedural/src/transactional.rs new file mode 100644 index 00000000000..a001f44c4d4 --- /dev/null +++ b/frame/support/procedural/src/transactional.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, ItemFn}; + +pub fn transactional(_attr: TokenStream, input: TokenStream) -> TokenStream { + let ItemFn { attrs, vis, sig, block } = parse_macro_input!(input as ItemFn); + + let output = quote! { + #(#attrs)* + #vis #sig { + use frame_support::storage::{with_transaction, TransactionOutcome}; + with_transaction(|| { + let r = #block; + if r.is_ok() { + TransactionOutcome::Commit(r) + } else { + TransactionOutcome::Rollback(r) + } + }) + } + }; + output.into() +} diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 2d9e61323b8..442a99effad 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -167,6 +167,28 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # fn main() {} /// ``` /// +/// ### Transactional Function Example +/// +/// Transactional function discards all changes to storage if it returns `Err`, or commits if +/// `Ok`, via the #\[transactional\] attribute. Note the attribute must be after #\[weight\]. +/// +/// ``` +/// # #[macro_use] +/// # extern crate frame_support; +/// # use frame_support::transactional; +/// # use frame_system::Trait; +/// decl_module! { +/// pub struct Module for enum Call where origin: T::Origin { +/// #[weight = 0] +/// #[transactional] +/// fn my_short_function(origin) { +/// // Your implementation +/// } +/// } +/// } +/// # fn main() {} +/// ``` +/// /// ### Privileged Function Example /// /// A privileged function checks that the origin of the call is `ROOT`. @@ -189,6 +211,14 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # fn main() {} /// ``` /// +/// ### Attributes on Functions +/// +/// Attributes on functions are supported, but must be in the order of: +/// 1. Optional #\[doc\] attribute. +/// 2. #\[weight\] attribute. +/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will be written +/// only on the dispatchable functions implemented on `Module`, not on the `Call` enum variant. +/// /// ## Multiple Module Instances Example /// /// A Substrate module can be built such that multiple instances of the same module can be used within a single @@ -1015,6 +1045,7 @@ macro_rules! decl_module { [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( $origin:ident $( , $(#[$codec_attr:ident])* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1039,6 +1070,7 @@ macro_rules! decl_module { $( $dispatchables )* $(#[doc = $doc_attr])* #[weight = $weight] + $(#[$fn_attr])* $fn_vis fn $fn_name( $origin $( , $(#[$codec_attr])* $param_name : $param )* ) $( -> $result )* { $( $impl )* } @@ -1066,6 +1098,7 @@ macro_rules! decl_module { { $( $integrity_test:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( $from:ident $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1094,6 +1127,7 @@ macro_rules! decl_module { [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( $origin:ident : T::Origin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1121,6 +1155,7 @@ macro_rules! decl_module { [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( origin : $origin:ty $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1148,6 +1183,7 @@ macro_rules! decl_module { [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( $( $(#[$codec_attr:ident])* $param_name:ident : $param:ty ),* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1410,13 +1446,13 @@ macro_rules! decl_module { $origin_ty:ty; $error_type:ty; $ignore:ident; - $(#[doc = $doc_attr:tt])* + $(#[$fn_attr:meta])* $vis:vis fn $name:ident ( $origin:ident $(, $param:ident : $param_ty:ty )* ) { $( $impl:tt )* } ) => { - $(#[doc = $doc_attr])* #[allow(unreachable_code)] + $(#[$fn_attr])* $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { @@ -1432,12 +1468,12 @@ macro_rules! decl_module { $origin_ty:ty; $error_type:ty; $ignore:ident; - $(#[doc = $doc_attr:tt])* + $(#[$fn_attr:meta])* $vis:vis fn $name:ident ( $origin:ident $(, $param:ident : $param_ty:ty )* ) -> $result:ty { $( $impl:tt )* } ) => { - $(#[doc = $doc_attr])* + $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { $crate::sp_tracing::enter_span!(stringify!($name)); $( $impl )* @@ -1569,6 +1605,7 @@ macro_rules! decl_module { $( $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] + $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( $from:ident $( , $(#[$codec_attr:ident])* $param_name:ident : $param:ty)* ) $( -> $result:ty )* { $( $impl:tt )* } @@ -1654,6 +1691,7 @@ macro_rules! decl_module { $(#[doc = $doc_attr])* /// /// NOTE: Calling this function will bypass origin filters. + $(#[$fn_attr])* $fn_vis fn $fn_name ( $from $(, $param_name : $param )* ) $( -> $result )* { $( $impl )* } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index f0ffdc90a74..bdbdfc04a31 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -267,7 +267,7 @@ macro_rules! ord_parameter_types { } #[doc(inline)] -pub use frame_support_procedural::{decl_storage, construct_runtime}; +pub use frame_support_procedural::{decl_storage, construct_runtime, transactional}; /// Return Err of the expression: `return Err($expression);`. /// diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index bf6c70966b4..a9711ec267e 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -17,9 +17,11 @@ use codec::{Encode, Decode, EncodeLike}; use frame_support::{ - StorageMap, StorageValue, storage::{with_transaction, TransactionOutcome::*}, + assert_ok, assert_noop, dispatch::{DispatchError, DispatchResult}, transactional, StorageMap, StorageValue, + storage::{with_transaction, TransactionOutcome::*}, }; use sp_io::TestExternalities; +use sp_std::result; pub trait Trait { type Origin; @@ -27,7 +29,20 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + #[transactional] + fn value_commits(_origin, v: u32) { + Value::set(v); + } + + #[weight = 0] + #[transactional] + fn value_rollbacks(_origin, v: u32) -> DispatchResult { + Value::set(v); + Err(DispatchError::Other("nah")) + } + } } frame_support::decl_storage!{ @@ -37,6 +52,11 @@ frame_support::decl_storage!{ } } +struct Runtime; +impl Trait for Runtime { + type Origin = u32; + type BlockNumber = u32; +} #[test] fn storage_transaction_basic_commit() { @@ -157,3 +177,36 @@ fn storage_transaction_commit_then_rollback() { assert_eq!(Map::get("val3"), 0); }); } + +#[test] +fn transactional_annotation() { + #[transactional] + fn value_commits(v: u32) -> result::Result { + Value::set(v); + Ok(v) + } + + #[transactional] + fn value_rollbacks(v: u32) -> result::Result { + Value::set(v); + Err("nah") + } + + TestExternalities::default().execute_with(|| { + assert_ok!(value_commits(2), 2); + assert_eq!(Value::get(), 2); + + assert_noop!(value_rollbacks(3), "nah"); + }); +} + +#[test] +fn transactional_annotation_in_decl_module() { + TestExternalities::default().execute_with(|| { + let origin = 0; + assert_ok!(>::value_commits(origin, 2)); + assert_eq!(Value::get(), 2); + + assert_noop!(>::value_rollbacks(origin, 3), "nah"); + }); +} -- GitLab From f6d66db4a44596630943b4905e871125b9a490c6 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 12 Aug 2020 11:58:01 +0200 Subject: [PATCH 085/132] Add a warning if users pass --sentry or --sentry-nodes (#6779) * Add a warning if users pass --sentry or --sentry-nodes * Apply suggestions from code review Co-authored-by: Max Inden * Fix text Co-authored-by: parity-processbot <> Co-authored-by: Max Inden --- client/network/src/service.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 230af3fb8e1..11fdfc0beec 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -180,6 +180,21 @@ impl NetworkWorker { known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); } + let print_deprecated_message = match ¶ms.role { + Role::Sentry { .. } => true, + Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, + _ => false, + }; + if print_deprecated_message { + log::warn!( + "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ + CLI options will eventually be removed in a future version. The Substrate \ + and Polkadot networking protocol require validators to be \ + publicly-accessible. Please do not block access to your validator nodes. \ + For details, see https://github.com/paritytech/substrate/issues/6845." + ); + } + let mut sentries_and_validators = HashSet::new(); match ¶ms.role { Role::Sentry { validators } => { -- GitLab From 5b809d2516e7297c2f2659de767088cdf2b4b7f5 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 12 Aug 2020 12:46:28 +0200 Subject: [PATCH 086/132] pallet-evm: fix wrong logic in mutate_account_basic (#6786) * pallet-evm: fix wrong logic in mutate_account_basic * Add test for mutate account --- frame/evm/src/lib.rs | 8 ++++---- frame/evm/src/tests.rs | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 013da0cca97..910030383e1 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -438,11 +438,11 @@ impl Module { } } - if current.balance < new.balance { - let diff = new.balance - current.balance; - T::Currency::slash(&account_id, diff.low_u128().unique_saturated_into()); - } else if current.balance > new.balance { + if current.balance > new.balance { let diff = current.balance - new.balance; + T::Currency::slash(&account_id, diff.low_u128().unique_saturated_into()); + } else if current.balance < new.balance { + let diff = new.balance - current.balance; T::Currency::deposit_creating(&account_id, diff.low_u128().unique_saturated_into()); } } diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs index f818ee630b7..652d6c723b9 100644 --- a/frame/evm/src/tests.rs +++ b/frame/evm/src/tests.rs @@ -166,3 +166,23 @@ fn fail_call_return_ok() { )); }); } + +#[test] +fn mutate_account_works() { + new_test_ext().execute_with(|| { + EVM::mutate_account_basic( + &H160::from_str("1000000000000000000000000000000000000001").unwrap(), + Account { + nonce: U256::from(10), + balance: U256::from(1000), + }, + ); + + assert_eq!(EVM::account_basic( + &H160::from_str("1000000000000000000000000000000000000001").unwrap() + ), Account { + nonce: U256::from(10), + balance: U256::from(1000), + }); + }); +} -- GitLab From 0c3cdf16d48532969237d1aa0e5cf27d7ade8018 Mon Sep 17 00:00:00 2001 From: mattrutherford <44339188+mattrutherford@users.noreply.github.com> Date: Wed, 12 Aug 2020 12:53:21 +0100 Subject: [PATCH 087/132] Implement tracing::Event handling & parent_id for spans and events (#6672) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implement events handling, implement parent_id for spans & events * add events to sp_io::storage * update test * add tests * adjust limit * let tracing crate handle parent_ids * re-enable current-id tracking * add test for threads with CurrentSpan * fix log level * remove redundant check for non wasm traces * remove duplicate definition in test * Adding conditional events API * prefer explicit parent_id over current, enhance test * limit changes to client::tracing event implementation * remove From impl due to fallback required on parent_id * implement SPAN_LIMIT change event log output * change version of tracing-core * update dependancies * revert limit * remove duplicate dependency * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Matt Rutherford Co-authored-by: Benjamin Kampmann Co-authored-by: Bastian Köcher --- Cargo.lock | 74 +++++- bin/node/cli/Cargo.toml | 2 +- client/cli/src/lib.rs | 2 +- client/executor/Cargo.toml | 2 +- client/executor/src/integration_tests/mod.rs | 5 +- client/service/Cargo.toml | 2 +- client/tracing/Cargo.toml | 6 +- client/tracing/src/lib.rs | 240 ++++++++++++++++--- primitives/runtime-interface/test/Cargo.toml | 2 +- primitives/tracing/Cargo.toml | 2 +- 10 files changed, 288 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 781e5e14bad..d98b3bb9d95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3222,6 +3222,15 @@ dependencies = [ "libc", ] +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -5818,6 +5827,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ "byteorder", + "regex-syntax", ] [[package]] @@ -7087,7 +7097,7 @@ dependencies = [ "slog", "sp-tracing", "tracing", - "tracing-core", + "tracing-subscriber", ] [[package]] @@ -7372,6 +7382,15 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "sharded-slab" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "0.1.1" @@ -9116,9 +9135,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.14" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c6b59d116d218cb2d990eb06b77b64043e0268ef7323aae63d8b30ae462923" +checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" dependencies = [ "cfg-if", "tracing-attributes", @@ -9127,9 +9146,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" +checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" dependencies = [ "proc-macro2", "quote", @@ -9138,11 +9157,52 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.10" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2734b5a028fa697686f16c6d18c2c6a3c7e41513f9a213abb6754c4acb3c8d7" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" dependencies = [ "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b33f8b2ef2ab0c3778c12646d9c42a24f7772bee4cdafc72199644a9f58fdc" +dependencies = [ + "ansi_term 0.12.1", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec 1.4.1", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 2f0124482e2..16ab9bbe806 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -43,7 +43,7 @@ jsonrpc-pubsub = "14.2.0" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } -tracing = "0.1.10" +tracing = "0.1.18" parking_lot = "0.10.0" # primitives diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 7899e48b0a2..f940ab0b95d 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -240,7 +240,7 @@ pub fn init_logger(pattern: &str) { builder.filter(Some("hyper"), log::LevelFilter::Warn); builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); // Always log the special target `sc_tracing`, overrides global level - builder.filter(Some("sc_tracing"), log::LevelFilter::Info); + builder.filter(Some("sc_tracing"), log::LevelFilter::Trace); // Enable info for others. builder.filter(None, log::LevelFilter::Info); diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 2a6844c31f3..f59c89a9d70 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -46,7 +46,7 @@ test-case = "0.3.3" sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } sp-tracing = { version = "2.0.0-rc5", path = "../../primitives/tracing" } sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } -tracing = "0.1.14" +tracing = "0.1.18" [features] default = [ "std" ] diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 5276884e923..a9ac0d0f30c 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -661,7 +661,8 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { use std::sync::{Arc, Mutex}; - use sc_tracing::SpanDatum; + + use sc_tracing::{SpanDatum, TraceEvent}; struct TestTraceHandler(Arc>>); @@ -669,6 +670,8 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { fn handle_span(&self, sd: SpanDatum) { self.0.lock().unwrap().push(sd); } + + fn handle_event(&self, _event: TraceEvent) {} } let traces = Arc::new(Mutex::new(Vec::new())); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 212d4e4b59e..3ad91dc3ea3 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -72,7 +72,7 @@ sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } sc-offchain = { version = "2.0.0-rc5", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } -tracing = "0.1.10" +tracing = "0.1.18" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 998dfb94de0..0a692cbe57f 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -19,10 +19,8 @@ rustc-hash = "1.1.0" serde = "1.0.101" serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } -tracing-core = "0.1.7" +tracing = "0.1.18" +tracing-subscriber = "0.2.10" sp-tracing = { version = "2.0.0-rc2", path = "../../primitives/tracing" } sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } - -[dev-dependencies] -tracing = "0.1.10" diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index c2b036e218f..f642b00720f 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -31,7 +31,7 @@ use std::time::{Duration, Instant}; use parking_lot::Mutex; use serde::ser::{Serialize, Serializer, SerializeMap}; -use tracing_core::{ +use tracing::{ event::Event, field::{Visit, Field}, Level, @@ -39,6 +39,7 @@ use tracing_core::{ span::{Attributes, Id, Record}, subscriber::Subscriber, }; +use tracing_subscriber::CurrentSpan; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_tracing::proxy::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; @@ -46,6 +47,15 @@ use sp_tracing::proxy::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; const ZERO_DURATION: Duration = Duration::from_nanos(0); const PROXY_TARGET: &'static str = "sp_tracing::proxy"; +/// Responsible for assigning ids to new spans, which are not re-used. +pub struct ProfilingSubscriber { + next_id: AtomicU64, + targets: Vec<(String, Level)>, + trace_handler: Box, + span_data: Mutex>, + current_span: CurrentSpan, +} + /// Used to configure how to receive the metrics #[derive(Debug, Clone)] pub enum TracingReceiver { @@ -65,14 +75,28 @@ impl Default for TracingReceiver { pub trait TraceHandler: Send + Sync { /// Process a `SpanDatum` fn handle_span(&self, span: SpanDatum); + /// Process a `TraceEvent` + fn handle_event(&self, event: TraceEvent); +} + +/// Represents a tracing event, complete with values +#[derive(Debug)] +pub struct TraceEvent { + pub name: &'static str, + pub target: String, + pub level: Level, + pub values: Values, + pub parent_id: Option, } /// Represents a single instance of a tracing span #[derive(Debug)] pub struct SpanDatum { /// id for this span - pub id: u64, - /// Name of the span + pub id: Id, + /// id of the parent span, if any + pub parent_id: Option, + /// Name of this span pub name: String, /// Target, typically module pub target: String, @@ -192,14 +216,6 @@ impl slog::Value for Values { } } -/// Responsible for assigning ids to new spans, which are not re-used. -pub struct ProfilingSubscriber { - next_id: AtomicU64, - targets: Vec<(String, Level)>, - trace_handler: Box, - span_data: Mutex>, -} - impl ProfilingSubscriber { /// Takes a `TracingReceiver` and a comma separated list of targets, /// either with a level: "pallet=trace,frame=debug" @@ -229,6 +245,7 @@ impl ProfilingSubscriber { targets, trace_handler, span_data: Mutex::new(FxHashMap::default()), + current_span: Default::default() } } @@ -271,17 +288,18 @@ impl Subscriber for ProfilingSubscriber { } fn new_span(&self, attrs: &Attributes<'_>) -> Id { - let id = self.next_id.fetch_add(1, Ordering::Relaxed); + let id = Id::from_u64(self.next_id.fetch_add(1, Ordering::Relaxed)); let mut values = Values::default(); attrs.record(&mut values); // If this is a wasm trace, check if target/level is enabled if let Some(wasm_target) = values.string_values.get(WASM_TARGET_KEY) { if !self.check_target(wasm_target, attrs.metadata().level()) { - return Id::from_u64(id); + return id } } let span_datum = SpanDatum { - id, + id: id.clone(), + parent_id: attrs.parent().cloned().or_else(|| self.current_span.id()), name: attrs.metadata().name().to_owned(), target: attrs.metadata().target().to_owned(), level: attrs.metadata().level().clone(), @@ -290,33 +308,46 @@ impl Subscriber for ProfilingSubscriber { overall_time: ZERO_DURATION, values, }; - self.span_data.lock().insert(id, span_datum); - Id::from_u64(id) + self.span_data.lock().insert(id.clone(), span_datum); + id } fn record(&self, span: &Id, values: &Record<'_>) { let mut span_data = self.span_data.lock(); - if let Some(s) = span_data.get_mut(&span.into_u64()) { + if let Some(s) = span_data.get_mut(span) { values.record(&mut s.values); } } fn record_follows_from(&self, _span: &Id, _follows: &Id) {} - fn event(&self, _event: &Event<'_>) {} + fn event(&self, event: &Event<'_>) { + let mut values = Values::default(); + event.record(&mut values); + let trace_event = TraceEvent { + name: event.metadata().name(), + target: event.metadata().target().to_owned(), + level: event.metadata().level().clone(), + values, + parent_id: event.parent().cloned().or_else(|| self.current_span.id()), + }; + self.trace_handler.handle_event(trace_event); + } fn enter(&self, span: &Id) { + self.current_span.enter(span.clone()); let mut span_data = self.span_data.lock(); let start_time = Instant::now(); - if let Some(mut s) = span_data.get_mut(&span.into_u64()) { + if let Some(mut s) = span_data.get_mut(&span) { s.start_time = start_time; } } fn exit(&self, span: &Id) { + self.current_span.exit(); let end_time = Instant::now(); let mut span_data = self.span_data.lock(); - if let Some(mut s) = span_data.get_mut(&span.into_u64()) { + if let Some(mut s) = span_data.get_mut(&span) { s.overall_time = end_time - s.start_time + s.overall_time; } } @@ -324,7 +355,7 @@ impl Subscriber for ProfilingSubscriber { fn try_close(&self, span: Id) -> bool { let span_datum = { let mut span_data = self.span_data.lock(); - span_data.remove(&span.into_u64()) + span_data.remove(&span) }; if let Some(mut span_datum) = span_datum { if span_datum.name == WASM_TRACE_IDENTIFIER { @@ -335,8 +366,10 @@ impl Subscriber for ProfilingSubscriber { if let Some(t) = span_datum.values.string_values.remove(WASM_TARGET_KEY) { span_datum.target = t; } - } - if self.check_target(&span_datum.target, &span_datum.level) { + if self.check_target(&span_datum.target, &span_datum.level) { + self.trace_handler.handle_span(span_datum); + } + } else { self.trace_handler.handle_span(span_datum); } }; @@ -361,23 +394,37 @@ impl TraceHandler for LogTraceHandler { fn handle_span(&self, span_datum: SpanDatum) { if span_datum.values.is_empty() { log::log!( - log_level(span_datum.level), - "{}: {}, time: {}", + log_level(span_datum.level), + "{}: {}, time: {}, id: {}, parent_id: {:?}", span_datum.target, span_datum.name, span_datum.overall_time.as_nanos(), + span_datum.id.into_u64(), + span_datum.parent_id.map(|s| s.into_u64()), ); } else { log::log!( log_level(span_datum.level), - "{}: {}, time: {}, {}", + "{}: {}, time: {}, id: {}, parent_id: {:?}, values: {}", span_datum.target, span_datum.name, span_datum.overall_time.as_nanos(), + span_datum.id.into_u64(), + span_datum.parent_id.map(|s| s.into_u64()), span_datum.values, ); } } + + fn handle_event(&self, event: TraceEvent) { + log::log!( + log_level(event.level), + "{}, parent_id: {:?}, {}", + event.target, + event.parent_id.map(|s| s.into_u64()), + event.values, + ); + } } /// TraceHandler for sending span data to telemetry, @@ -390,11 +437,21 @@ impl TraceHandler for TelemetryTraceHandler { telemetry!(SUBSTRATE_INFO; "tracing.profiling"; "name" => span_datum.name, "target" => span_datum.target, - "line" => span_datum.line, "time" => span_datum.overall_time.as_nanos(), + "id" => span_datum.id.into_u64(), + "parent_id" => span_datum.parent_id.map(|i| i.into_u64()), "values" => span_datum.values ); } + + fn handle_event(&self, event: TraceEvent) { + telemetry!(SUBSTRATE_INFO; "tracing.event"; + "name" => event.name, + "target" => event.target, + "parent_id" => event.parent_id.map(|i| i.into_u64()), + "values" => event.values + ); + } } #[cfg(test)] @@ -404,37 +461,47 @@ mod tests { struct TestTraceHandler { spans: Arc>>, + events: Arc>>, } impl TraceHandler for TestTraceHandler { fn handle_span(&self, sd: SpanDatum) { self.spans.lock().push(sd); } + + fn handle_event(&self, event: TraceEvent) { + self.events.lock().push(event); + } } - fn setup_subscriber() -> (ProfilingSubscriber, Arc>>) { + fn setup_subscriber() -> (ProfilingSubscriber, Arc>>, Arc>>) { let spans = Arc::new(Mutex::new(Vec::new())); + let events = Arc::new(Mutex::new(Vec::new())); let handler = TestTraceHandler { spans: spans.clone(), + events: events.clone(), }; let test_subscriber = ProfilingSubscriber::new_with_handler( Box::new(handler), "test_target" ); - (test_subscriber, spans) + (test_subscriber, spans, events) } #[test] fn test_span() { - let (sub, spans) = setup_subscriber(); + let (sub, spans, events) = setup_subscriber(); let _sub_guard = tracing::subscriber::set_default(sub); let span = tracing::info_span!(target: "test_target", "test_span1"); assert_eq!(spans.lock().len(), 0); + assert_eq!(events.lock().len(), 0); let _guard = span.enter(); assert_eq!(spans.lock().len(), 0); + assert_eq!(events.lock().len(), 0); drop(_guard); drop(span); assert_eq!(spans.lock().len(), 1); + assert_eq!(events.lock().len(), 0); let sd = spans.lock().remove(0); assert_eq!(sd.name, "test_span1"); assert_eq!(sd.target, "test_target"); @@ -442,9 +509,26 @@ mod tests { assert!(time > 0); } + #[test] + fn test_span_parent_id() { + let (sub, spans, _events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_default(sub); + let span1 = tracing::info_span!(target: "test_target", "test_span1"); + let _guard1 = span1.enter(); + let span2 = tracing::info_span!(target: "test_target", "test_span2"); + let _guard2 = span2.enter(); + drop(_guard2); + drop(span2); + let sd2 = spans.lock().remove(0); + drop(_guard1); + drop(span1); + let sd1 = spans.lock().remove(0); + assert_eq!(sd1.id, sd2.parent_id.unwrap()) + } + #[test] fn test_span_values() { - let (sub, spans) = setup_subscriber(); + let (sub, spans, _events) = setup_subscriber(); let _sub_guard = tracing::subscriber::set_default(sub); let test_bool = true; let test_u64 = 1u64; @@ -470,4 +554,98 @@ mod tests { assert_eq!(values.i64_values.get("test_i64").unwrap(), &test_i64); assert_eq!(values.string_values.get("test_str").unwrap(), &test_str.to_owned()); } + + #[test] + fn test_event() { + let (sub, _spans, events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_default(sub); + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); + let mut te1 = events.lock().remove(0); + assert_eq!(te1.values.string_values.remove(&"message".to_owned()).unwrap(), "test_event".to_owned()); + } + + #[test] + fn test_event_parent_id() { + let (sub, spans, events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_default(sub); + + // enter span + let span1 = tracing::info_span!(target: "test_target", "test_span1"); + let _guard1 = span1.enter(); + + // emit event + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); + + //exit span + drop(_guard1); + drop(span1); + + let sd1 = spans.lock().remove(0); + let te1 = events.lock().remove(0); + + assert_eq!(sd1.id, te1.parent_id.unwrap()); + } + + #[test] + fn test_parent_id_with_threads() { + use std::sync::mpsc; + use std::thread; + + let (sub, spans, events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_global_default(sub); + let span1 = tracing::info_span!(target: "test_target", "test_span1"); + let _guard1 = span1.enter(); + + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let span2 = tracing::info_span!(target: "test_target", "test_span2"); + let _guard2 = span2.enter(); + // emit event + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); + for msg in rx.recv() { + if msg == false { + break; + } + } + // gard2 and span2 dropped / exited + }); + + // wait for Event to be dispatched and stored + while events.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } + + // emit new event (will be second item in Vec) while span2 still active in other thread + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event2"); + + // stop thread and drop span + let _ = tx.send(false); + let _ = handle.join(); + + // wait for Span to be dispatched and stored + while spans.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } + let span2 = spans.lock().remove(0); + let event1 = events.lock().remove(0); + drop(_guard1); + drop(span1); + + // emit event with no parent + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event3"); + + let span1 = spans.lock().remove(0); + let event2 = events.lock().remove(0); + + assert_eq!(event1.values.string_values.get("message").unwrap(), "test_event1"); + assert_eq!(event2.values.string_values.get("message").unwrap(), "test_event2"); + assert!(span1.parent_id.is_none()); + assert!(span2.parent_id.is_none()); + assert_eq!(span2.id, event1.parent_id.unwrap()); + assert_eq!(span1.id, event2.parent_id.unwrap()); + assert_ne!(span2.id, span1.id); + + let event3 = events.lock().remove(0); + assert!(event3.parent_id.is_none()); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 5e2ea5a6234..48dbeedbdad 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -20,4 +20,4 @@ sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-ma sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } sp-core = { version = "2.0.0-rc5", path = "../../core" } sp-io = { version = "2.0.0-rc5", path = "../../io" } -tracing = "0.1.13" +tracing = "0.1.18" diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index fc3d311298d..03bec79685e 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -12,7 +12,7 @@ description = "Instrumentation primitives and macros for Substrate." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tracing = { version = "0.1.13", optional = true } +tracing = { version = "0.1.18", optional = true } rental = { version = "0.5.5", optional = true } log = { version = "0.4.8", optional = true } -- GitLab From c495f89ca3483ab5972cf87c407191ac5fc91617 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Wed, 12 Aug 2020 16:07:11 +0200 Subject: [PATCH 088/132] Add async test helper to timeout and provide a task_executor automatically (#6651) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * Add async test helper to timeout and provide a task_executor automatically * simplify error message to avoid difference between CI and locally * forgot env var * Use runtime env var instead of build env var * Rename variable to SUBSTRATE_TEST_TIMEOUT * CLEANUP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * Apply suggestions from code review Co-authored-by: Bastian Köcher * Re-export from test-utils * Default value to 120 * fix wrong crate in ci * Revert "Default value to 120" This reverts commit 8e458717078b242ffce7d3c4f66241d76f075125. * Fix version * WIP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * WIP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * WIP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * remove feature flag * fix missing dependency * CLEANUP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * fix test * Removed autotests=false * Some doc... * Apply suggestions from code review Co-authored-by: Bastian Köcher * WIP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * WIP Forked at: 60e3a693b29789045614e2ed73126695bc8b0794 Parent branch: origin/master * Update test-utils/src/lib.rs Co-authored-by: Bastian Köcher --- .gitlab-ci.yml | 3 +- Cargo.lock | 32 ++++++ Cargo.toml | 2 + test-utils/Cargo.toml | 9 ++ test-utils/derive/Cargo.toml | 16 +++ test-utils/derive/src/lib.rs | 107 ++++++++++++++++++ test-utils/src/lib.rs | 23 ++++ test-utils/test-crate/Cargo.toml | 16 +++ test-utils/test-crate/src/main.rs | 25 ++++ test-utils/tests/basic.rs | 58 ++++++++++ test-utils/tests/ui.rs | 24 ++++ test-utils/tests/ui/missing-func-parameter.rs | 24 ++++ .../tests/ui/missing-func-parameter.stderr | 5 + .../tests/ui/too-many-func-parameters.rs | 27 +++++ .../tests/ui/too-many-func-parameters.stderr | 5 + 15 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 test-utils/derive/Cargo.toml create mode 100644 test-utils/derive/src/lib.rs create mode 100644 test-utils/test-crate/Cargo.toml create mode 100644 test-utils/test-crate/src/main.rs create mode 100644 test-utils/tests/basic.rs create mode 100644 test-utils/tests/ui.rs create mode 100644 test-utils/tests/ui/missing-func-parameter.rs create mode 100644 test-utils/tests/ui/missing-func-parameter.stderr create mode 100644 test-utils/tests/ui/too-many-func-parameters.rs create mode 100644 test-utils/tests/ui/too-many-func-parameters.stderr diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 82368843d11..c97d68bab00 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -263,6 +263,7 @@ test-linux-stable: &test-linux script: # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml + - WASM_BUILD_NO_COLOR=1 SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - sccache -s unleash-check: @@ -727,7 +728,7 @@ deploy-kubernetes-alerting-rules: RULES: .maintain/monitoring/alerting-rules/alerting-rules.yaml script: - echo "deploying prometheus alerting rules" - - kubectl -n ${NAMESPACE} patch prometheusrule ${PROMETHEUSRULE} + - kubectl -n ${NAMESPACE} patch prometheusrule ${PROMETHEUSRULE} --type=merge --patch "$(sed 's/^/ /;1s/^/spec:\n/' ${RULES})" only: refs: diff --git a/Cargo.lock b/Cargo.lock index d98b3bb9d95..dc664f97dde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1186,6 +1186,12 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "dissimilar" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc4b29f4b9bb94bf267d57269fd0706d343a160937108e9619fe380645428abb" + [[package]] name = "dns-parser" version = "0.8.0" @@ -8605,6 +8611,31 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "2.0.0-rc5" +dependencies = [ + "futures 0.3.5", + "sc-service", + "substrate-test-utils-derive", + "tokio 0.2.21", + "trybuild", +] + +[[package]] +name = "substrate-test-utils-derive" +version = "0.8.0-rc5" +dependencies = [ + "proc-macro-crate", + "quote 1.0.6", + "syn 1.0.33", +] + +[[package]] +name = "substrate-test-utils-test-crate" +version = "0.1.0" +dependencies = [ + "sc-service", + "substrate-test-utils", + "tokio 0.2.21", +] [[package]] name = "substrate-wasm-builder" @@ -9271,6 +9302,7 @@ version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459186ab1afd6d93bd23c2269125f4f7694f8771fe0e64434b4bdc212b94034d" dependencies = [ + "dissimilar", "glob 0.3.0", "lazy_static", "serde", diff --git a/Cargo.toml b/Cargo.toml index ba146e55bca..f22e3427a70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -171,9 +171,11 @@ members = [ "primitives/utils", "primitives/wasm-interface", "test-utils/client", + "test-utils/derive", "test-utils/runtime", "test-utils/runtime/client", "test-utils/runtime/transaction-pool", + "test-utils/test-crate", "utils/browser", "utils/build-script-utils", "utils/fork-tree", diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 6d56de9ff99..3b2a3702430 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -9,3 +9,12 @@ repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +futures = { version = "0.3.1", features = ["compat"] } +substrate-test-utils-derive = { path = "./derive" } +tokio = { version = "0.2.13", features = ["macros"] } + +[dev-dependencies] +sc-service = { path = "../client/service" } +trybuild = { version = "1.0", features = ["diff"] } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml new file mode 100644 index 00000000000..5ec3e10108c --- /dev/null +++ b/test-utils/derive/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "substrate-test-utils-derive" +version = "0.8.0-rc5" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[dependencies] +quote = "1.0.6" +syn = { version = "1.0.33", features = ["full"] } +proc-macro-crate = "0.1.4" + +[lib] +proc-macro = true diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs new file mode 100644 index 00000000000..f5d62706896 --- /dev/null +++ b/test-utils/derive/src/lib.rs @@ -0,0 +1,107 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proc_macro::{Span, TokenStream}; +use proc_macro_crate::crate_name; +use quote::quote; +use std::env; + +#[proc_macro_attribute] +pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { + impl_test(args, item) +} + +fn impl_test(args: TokenStream, item: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(item as syn::ItemFn); + let args = syn::parse_macro_input!(args as syn::AttributeArgs); + + parse_knobs(input, args).unwrap_or_else(|e| e.to_compile_error().into()) +} + +fn parse_knobs( + mut input: syn::ItemFn, + args: syn::AttributeArgs, +) -> Result { + let sig = &mut input.sig; + let body = &input.block; + let attrs = &input.attrs; + let vis = input.vis; + + if sig.inputs.len() != 1 { + let msg = "the test function accepts only one argument of type sc_service::TaskExecutor"; + return Err(syn::Error::new_spanned(&sig, msg)); + } + let (task_executor_name, task_executor_type) = match sig.inputs.pop().map(|x| x.into_value()) { + Some(syn::FnArg::Typed(x)) => (x.pat, x.ty), + _ => { + let msg = + "the test function accepts only one argument of type sc_service::TaskExecutor"; + return Err(syn::Error::new_spanned(&sig, msg)); + } + }; + + let crate_name = if env::var("CARGO_PKG_NAME").unwrap() == "substrate-test-utils" { + syn::Ident::new("substrate_test_utils", Span::call_site().into()) + } else { + let crate_name = crate_name("substrate-test-utils") + .map_err(|e| syn::Error::new_spanned(&sig, e))?; + + syn::Ident::new(&crate_name, Span::call_site().into()) + }; + + let header = { + quote! { + #[#crate_name::tokio::test(#(#args)*)] + } + }; + + let result = quote! { + #header + #(#attrs)* + #vis #sig { + use #crate_name::futures::future::FutureExt; + + let #task_executor_name: #task_executor_type = (|fut, _| { + #crate_name::tokio::spawn(fut).map(drop) + }) + .into(); + let timeout_task = #crate_name::tokio::time::delay_for( + std::time::Duration::from_secs( + std::env::var("SUBSTRATE_TEST_TIMEOUT") + .ok() + .and_then(|x| x.parse().ok()) + .unwrap_or(600)) + ).fuse(); + let actual_test_task = async move { + #body + } + .fuse(); + + #crate_name::futures::pin_mut!(timeout_task, actual_test_task); + + #crate_name::futures::select! { + _ = timeout_task => { + panic!("The test took too long!"); + }, + _ = actual_test_task => {}, + } + } + }; + + Ok(result.into()) +} diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index 8163460df74..224eacd5129 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -17,6 +17,29 @@ //! Test utils +#[doc(hidden)] +pub use futures; +/// Marks async function to be executed by an async runtime and provide a `TaskExecutor`, suitable +/// to test environment. +/// +/// # Requirements +/// +/// You must have tokio in the `[dev-dependencies]` of your crate to use this macro. +/// +/// # Example +/// +/// ``` +/// #[substrate_test_utils::test] +/// async fn basic_test(task_executor: TaskExecutor) { +/// assert!(true); +/// // create your node in here and use task_executor +/// // then don't forget to gracefully shutdown your node before exit +/// } +/// ``` +pub use substrate_test_utils_derive::test; +#[doc(hidden)] +pub use tokio; + /// Panic when the vectors are different, without taking the order into account. /// /// # Examples diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml new file mode 100644 index 00000000000..6d16edde12c --- /dev/null +++ b/test-utils/test-crate/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "substrate-test-utils-test-crate" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] +tokio = { version = "0.2.13", features = ["macros"] } +test-utils = { path = "..", package = "substrate-test-utils" } +sc-service = { path = "../../client/service" } diff --git a/test-utils/test-crate/src/main.rs b/test-utils/test-crate/src/main.rs new file mode 100644 index 00000000000..209f29f7613 --- /dev/null +++ b/test-utils/test-crate/src/main.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(test)] +#[test_utils::test] +async fn basic_test(_: sc_service::TaskExecutor) { + assert!(true); +} + +fn main() {} diff --git a/test-utils/tests/basic.rs b/test-utils/tests/basic.rs new file mode 100644 index 00000000000..3e96bfe83d3 --- /dev/null +++ b/test-utils/tests/basic.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_service::{TaskExecutor, TaskType}; + +#[substrate_test_utils::test] +async fn basic_test(_: TaskExecutor) { + assert!(true); +} + +#[substrate_test_utils::test] +#[should_panic(expected = "boo!")] +async fn panicking_test(_: TaskExecutor) { + panic!("boo!"); +} + +#[substrate_test_utils::test(max_threads = 2)] +async fn basic_test_with_args(_: TaskExecutor) { + assert!(true); +} + +#[substrate_test_utils::test] +async fn rename_argument(ex: TaskExecutor) { + let ex2 = ex.clone(); + ex2.spawn(Box::pin(async { () }), TaskType::Blocking); + assert!(true); +} + +#[substrate_test_utils::test] +#[should_panic(expected = "test took too long")] +// NOTE: enable this test only after setting SUBSTRATE_TEST_TIMEOUT to a smaller value +// +// SUBSTRATE_TEST_TIMEOUT=1 cargo test -- --ignored timeout +#[ignore] +async fn timeout(_: TaskExecutor) { + tokio::time::delay_for(std::time::Duration::from_secs( + std::env::var("SUBSTRATE_TEST_TIMEOUT") + .expect("env var SUBSTRATE_TEST_TIMEOUT has been provided by the user") + .parse::() + .unwrap() + 1, + )) + .await; +} diff --git a/test-utils/tests/ui.rs b/test-utils/tests/ui.rs new file mode 100644 index 00000000000..1f3b466c7dd --- /dev/null +++ b/test-utils/tests/ui.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[test] +fn substrate_test_utils_derive_trybuild() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/missing-func-parameter.rs"); + t.compile_fail("tests/ui/too-many-func-parameters.rs"); +} diff --git a/test-utils/tests/ui/missing-func-parameter.rs b/test-utils/tests/ui/missing-func-parameter.rs new file mode 100644 index 00000000000..bd34a76902e --- /dev/null +++ b/test-utils/tests/ui/missing-func-parameter.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[substrate_test_utils::test] +async fn missing_func_parameter() { + assert!(true); +} + +fn main() {} diff --git a/test-utils/tests/ui/missing-func-parameter.stderr b/test-utils/tests/ui/missing-func-parameter.stderr new file mode 100644 index 00000000000..fbe0bc69918 --- /dev/null +++ b/test-utils/tests/ui/missing-func-parameter.stderr @@ -0,0 +1,5 @@ +error: the test function accepts only one argument of type sc_service::TaskExecutor + --> $DIR/missing-func-parameter.rs:20:1 + | +20 | async fn missing_func_parameter() { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/test-utils/tests/ui/too-many-func-parameters.rs b/test-utils/tests/ui/too-many-func-parameters.rs new file mode 100644 index 00000000000..9aeadc2a884 --- /dev/null +++ b/test-utils/tests/ui/too-many-func-parameters.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[allow(unused_imports)] +use sc_service::TaskExecutor; + +#[substrate_test_utils::test] +async fn too_many_func_parameters(task_executor_1: TaskExecutor, task_executor_2: TaskExecutor) { + assert!(true); +} + +fn main() {} diff --git a/test-utils/tests/ui/too-many-func-parameters.stderr b/test-utils/tests/ui/too-many-func-parameters.stderr new file mode 100644 index 00000000000..e30bb4ed8ee --- /dev/null +++ b/test-utils/tests/ui/too-many-func-parameters.stderr @@ -0,0 +1,5 @@ +error: the test function accepts only one argument of type sc_service::TaskExecutor + --> $DIR/too-many-func-parameters.rs:23:1 + | +23 | async fn too_many_func_parameters(task_executor_1: TaskExecutor, task_executor_2: TaskExecutor) { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- GitLab From 473a23f462c60fb5a063ea8dff9261e27ac0ea48 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 12 Aug 2020 16:16:40 +0200 Subject: [PATCH 089/132] client/authority-discovery: Introduce AuthorityDiscoveryService (#6760) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * client/authority-discovery: Rename AuthorityDiscovery to XXXWorker * client/authority-discovery: Introduce AuthorityDiscoveryService Add a basic `AuthorityDiscoveryService` implementation which enables callers to get the addresses for a given `AuthorityId` from the local cache. * client/authority-discovery: Split into worker and service mod Move `Service` and `Worker` to their own Rust modules resulting in the following file structure. ├── build.rs ├── Cargo.toml └── src ├── error.rs ├── lib.rs ├── service.rs ├── tests.rs ├── worker │   ├── addr_cache.rs │   ├── schema │   │   └── dht.proto │   └── tests.rs └── worker.rs * client/authority-discovery: Cache PeerId -> AuthorityId mapping * client/authority-discovery: Update priority group on interval Instead of updating the authority discovery peerset priority group each time a new DHT value is found, update it regularly on an interval. This removes the need for deterministic random selection. Instead of trying to return a random stable set of `Multiaddr`s, the `AddrCache` now returns a random set on each call. * client/authority-discovery: Implement Service::get_authority_id * client/authority-discovery: Use HashMap instead of BTreeMap * client/authority-discovery: Rework priority group interval * client/authority-discovery: Fix comment * bin/node/cli: Update authority discovery constructor * client/authority-discovery: Fuse from_service receiver * client/authority-discovery: Remove Rng import * client/authority-discovery: Ignore Multiaddr without PeerId * client/authority-discovery/service: Add note on returned None * client/authority-discovery/addr_cache: Replace double clone with deref --- bin/node/cli/src/service.rs | 4 +- client/authority-discovery/build.rs | 2 +- client/authority-discovery/src/addr_cache.rs | 205 ----- client/authority-discovery/src/lib.rs | 708 +--------------- client/authority-discovery/src/service.rs | 70 ++ client/authority-discovery/src/tests.rs | 540 +----------- client/authority-discovery/src/worker.rs | 785 ++++++++++++++++++ .../src/worker/addr_cache.rs | 233 ++++++ .../src/{ => worker}/schema/dht.proto | 0 .../authority-discovery/src/worker/tests.rs | 693 ++++++++++++++++ 10 files changed, 1844 insertions(+), 1396 deletions(-) delete mode 100644 client/authority-discovery/src/addr_cache.rs create mode 100644 client/authority-discovery/src/service.rs create mode 100644 client/authority-discovery/src/worker.rs create mode 100644 client/authority-discovery/src/worker/addr_cache.rs rename client/authority-discovery/src/{ => worker}/schema/dht.proto (100%) create mode 100644 client/authority-discovery/src/worker/tests.rs diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index be95ed6de53..cd98c268096 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -266,7 +266,7 @@ pub fn new_full_base( Event::Dht(e) => Some(e), _ => None, }}).boxed(); - let authority_discovery = sc_authority_discovery::AuthorityDiscovery::new( + let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( client.clone(), network.clone(), sentries, @@ -275,7 +275,7 @@ pub fn new_full_base( prometheus_registry.clone(), ); - task_manager.spawn_handle().spawn("authority-discovery", authority_discovery); + task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker); } // if the node isn't actively participating in consensus then it doesn't diff --git a/client/authority-discovery/build.rs b/client/authority-discovery/build.rs index ed632575f3b..c44fe8578ba 100644 --- a/client/authority-discovery/build.rs +++ b/client/authority-discovery/build.rs @@ -1,3 +1,3 @@ fn main() { - prost_build::compile_protos(&["src/schema/dht.proto"], &["src/schema"]).unwrap(); + prost_build::compile_protos(&["src/worker/schema/dht.proto"], &["src/worker/schema"]).unwrap(); } diff --git a/client/authority-discovery/src/addr_cache.rs b/client/authority-discovery/src/addr_cache.rs deleted file mode 100644 index f108afce0a9..00000000000 --- a/client/authority-discovery/src/addr_cache.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use std::{ - clone::Clone, - cmp::{Eq, Ord, PartialEq}, - collections::BTreeMap, - convert::AsRef, - hash::Hash, -}; - -/// The maximum number of authority connections initialized through the authority discovery module. -/// -/// In other words the maximum size of the `authority` peer set priority group. -const MAX_NUM_AUTHORITY_CONN: usize = 10; - -/// Cache of Multiaddresses of authority nodes or their sentry nodes. -// -// The network peerset interface for priority groups lets us only set an entire group, but we -// retrieve the addresses of other authorities one by one from the network. To use the peerset -// interface we need to cache the addresses and always overwrite the entire peerset priority -// group. To ensure this map doesn't grow indefinitely `purge_old_authorities_from_cache` -// function is called each time we add a new entry. -pub(super) struct AddrCache { - cache: BTreeMap>, - - /// Random number to seed address selection RNG. - /// - /// A node should only try to connect to a subset of all authorities. To choose this subset one - /// uses randomness. The choice should differ between nodes to prevent hot spots, but not within - /// each node between each update to prevent connection churn. Thus before each selection we - /// seed an RNG with the same seed. - rand_addr_selection_seed: u64, -} - -impl AddrCache -where - Id: Clone + Eq + Hash + Ord, - Addr: Clone + PartialEq + AsRef<[u8]>, -{ - pub fn new() -> Self { - AddrCache { - cache: BTreeMap::new(), - rand_addr_selection_seed: rand::thread_rng().gen(), - } - } - - pub fn insert(&mut self, id: Id, mut addresses: Vec) { - if addresses.is_empty() { - return; - } - - addresses.sort_by(|a, b| a.as_ref().cmp(b.as_ref())); - self.cache.insert(id, addresses); - } - - /// Returns the number of authority IDs in the cache. - pub fn num_ids(&self) -> usize { - self.cache.len() - } - - // Each node should connect to a subset of all authorities. In order to prevent hot spots, this - // selection is based on randomness. Selecting randomly each time we alter the address cache - // would result in connection churn. To reduce this churn a node generates a seed on startup and - // uses this seed for a new rng on each update. (One could as well use ones peer id as a seed. - // Given that the peer id is publicly known, it would make this process predictable by others, - // which might be used as an attack.) - pub fn get_subset(&self) -> Vec { - let mut rng = StdRng::seed_from_u64(self.rand_addr_selection_seed); - - let mut addresses = self - .cache - .iter() - .map(|(_peer_id, addresses)| { - addresses - .choose(&mut rng) - .expect("an empty address vector is never inserted into the cache") - }) - .cloned() - .collect::>(); - - addresses.dedup(); - addresses.sort_by(|a, b| a.as_ref().cmp(b.as_ref())); - - addresses - .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) - .cloned() - .collect() - } - - pub fn retain_ids(&mut self, ids: &Vec) { - let to_remove = self - .cache - .iter() - .filter(|(id, _addresses)| !ids.contains(id)) - .map(|entry| entry.0) - .cloned() - .collect::>(); - - for key in to_remove { - self.cache.remove(&key); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use quickcheck::{QuickCheck, TestResult}; - - #[test] - fn returns_addresses_of_same_authorities_on_repeated_calls() { - fn property(input: Vec<(u32, Vec)>) -> TestResult { - // Expect less than 1000 authorities. - if input.len() > 1000 { - return TestResult::discard(); - } - - // Expect less than 100 addresses per authority. - for i in &input { - if i.1.len() > 100 { - return TestResult::discard(); - } - } - - let mut c = AddrCache::new(); - - for (id, addresses) in input { - c.insert(id, addresses); - } - - let result = c.get_subset(); - assert!(result.len() <= MAX_NUM_AUTHORITY_CONN); - - for _ in 1..100 { - assert_eq!(c.get_subset(), result); - } - - TestResult::passed() - } - - QuickCheck::new() - .max_tests(10) - .quickcheck(property as fn(Vec<(u32, Vec)>) -> TestResult) - } - - #[test] - fn returns_same_addresses_of_first_authority_when_second_authority_changes() { - let mut c = AddrCache::new(); - - // Insert addresses of first authority. - let addresses = (1..100) - .map(|i| format!("{:?}", i)) - .collect::>(); - c.insert(1, addresses); - let first_subset = c.get_subset(); - assert_eq!(1, first_subset.len()); - - // Insert address of second authority. - c.insert(2, vec!["a".to_string()]); - let second_subset = c.get_subset(); - assert_eq!(2, second_subset.len()); - - // Expect same address of first authority. - assert!(second_subset.contains(&first_subset[0])); - - // Alter address of second authority. - c.insert(2, vec!["b".to_string()]); - let second_subset = c.get_subset(); - assert_eq!(2, second_subset.len()); - - // Expect same address of first authority. - assert!(second_subset.contains(&first_subset[0])); - } - - #[test] - fn retains_only_entries_of_provided_ids() { - let mut cache = AddrCache::new(); - - cache.insert(1, vec![vec![10]]); - cache.insert(2, vec![vec![20]]); - cache.insert(3, vec![vec![30]]); - - cache.retain_ids(&vec![1, 3]); - - let mut subset = cache.get_subset(); - subset.sort(); - - assert_eq!(vec![vec![10], vec![30]], subset); - } -} diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 1a4473d665c..347deb8d9fc 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -18,705 +18,61 @@ //! Substrate authority discovery. //! -//! This crate enables Substrate authorities to directly connect to other authorities. -//! [`AuthorityDiscovery`] implements the Future trait. By polling [`AuthorityDiscovery`] an -//! authority: +//! This crate enables Substrate authorities to discover and directly connect to +//! other authorities. It is split into two components the [`Worker`] and the +//! [`Service`]. //! -//! -//! 1. **Makes itself discoverable** -//! -//! 1. Retrieves its external addresses (including peer id) or the ones of its sentry nodes. -//! -//! 2. Signs the above. -//! -//! 3. Puts the signature and the addresses on the libp2p Kademlia DHT. -//! -//! -//! 2. **Discovers other authorities** -//! -//! 1. Retrieves the current set of authorities. -//! -//! 2. Starts DHT queries for the ids of the authorities. -//! -//! 3. Validates the signatures of the retrieved key value pairs. -//! -//! 4. Adds the retrieved external addresses as priority nodes to the peerset. -//! -//! When run as a sentry node, the authority discovery module does not -//! publish any addresses to the DHT but still discovers validators and -//! sentry nodes of validators, i.e. only step 2 (Discovers other authorities) -//! is executed. +//! See [`Worker`] and [`Service`] for more documentation. + +pub use crate::{service::Service, worker::{NetworkProvider, Worker, Role}}; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; -use std::marker::PhantomData; use std::pin::Pin; use std::sync::Arc; -use std::time::{Duration, Instant}; -use futures::task::{Context, Poll}; -use futures::{Future, FutureExt, ready, Stream, StreamExt}; -use futures_timer::Delay; +use futures::channel::{mpsc, oneshot}; +use futures::Stream; -use addr_cache::AddrCache; -use codec::Decode; -use error::{Error, Result}; -use libp2p::core::multiaddr; -use log::{debug, error, log_enabled}; -use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; -use prost::Message; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{ - config::MultiaddrWithPeerId, - DhtEvent, - ExHashT, - Multiaddr, - NetworkStateInfo, - PeerId, -}; -use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; -use sp_core::crypto::{key_types, Pair}; -use sp_core::traits::BareCryptoStorePtr; -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sc_network::{config::MultiaddrWithPeerId, DhtEvent, Multiaddr, PeerId}; +use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; +use sp_runtime::traits::Block as BlockT; use sp_api::ProvideRuntimeApi; +mod error; +mod service; #[cfg(test)] mod tests; +mod worker; -mod error; -mod addr_cache; -/// Dht payload schemas generated from Protobuf definitions via Prost crate in build.rs. -mod schema { - include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); -} - -type Interval = Box + Unpin + Send + Sync>; - -const LOG_TARGET: &'static str = "sub-authority-discovery"; - -/// Upper bound estimation on how long one should wait before accessing the Kademlia DHT. -const LIBP2P_KADEMLIA_BOOTSTRAP_TIME: Duration = Duration::from_secs(30); - -/// Name of the Substrate peerset priority group for authorities discovered through the authority -/// discovery module. -const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; - -/// Role an authority discovery module can run as. -pub enum Role { - /// Actual authority as well as a reference to its key store. - Authority(BareCryptoStorePtr), - /// Sentry node that guards an authority. - /// - /// No reference to its key store needed, as sentry nodes don't have an identity to sign - /// addresses with in the first place. - Sentry, -} - -/// An `AuthorityDiscovery` makes a given authority discoverable and discovers other authorities. -pub struct AuthorityDiscovery -where - Block: BlockT + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, -{ +/// Create a new authority discovery [`Worker`] and [`Service`]. +pub fn new_worker_and_service( client: Arc, - network: Arc, - /// List of sentry node public addresses. - // - // There are 3 states: - // - None: No addresses were specified. - // - Some(vec![]): Addresses were specified, but none could be parsed as proper - // Multiaddresses. - // - Some(vec![a, b, c, ...]): Valid addresses were specified. - sentry_nodes: Option>, - /// Channel we receive Dht events on. + sentry_nodes: Vec, dht_event_rx: Pin + Send>>, - - /// Interval to be proactive, publishing own addresses. - publish_interval: Interval, - /// Interval on which to query for addresses of other authorities. - query_interval: Interval, - - addr_cache: addr_cache::AddrCache, - - metrics: Option, - role: Role, - - phantom: PhantomData, -} - -impl AuthorityDiscovery -where - Block: BlockT + Unpin + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, - Self: Future, -{ - /// Return a new authority discovery. - /// - /// Note: When specifying `sentry_nodes` this module will not advertise the public addresses of - /// the node itself but only the public addresses of its sentry nodes. - pub fn new( - client: Arc, - network: Arc, - sentry_nodes: Vec, - dht_event_rx: Pin + Send>>, - role: Role, - prometheus_registry: Option, - ) -> Self { - // Kademlia's default time-to-live for Dht records is 36h, republishing records every 24h. - // Given that a node could restart at any point in time, one can not depend on the - // republishing process, thus publishing own external addresses should happen on an interval - // < 36h. - let publish_interval = interval_at( - Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, - Duration::from_secs(12 * 60 * 60), - ); - - // External addresses of other authorities can change at any given point in time. The - // interval on which to query for external addresses of other authorities is a trade off - // between efficiency and performance. - let query_interval = interval_at( - Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, - Duration::from_secs(10 * 60), - ); - - let sentry_nodes = if !sentry_nodes.is_empty() { - Some(sentry_nodes.into_iter().map(|ma| ma.concat()).collect::>()) - } else { - None - }; - - let addr_cache = AddrCache::new(); - - let metrics = match prometheus_registry { - Some(registry) => { - match Metrics::register(®istry) { - Ok(metrics) => Some(metrics), - Err(e) => { - error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); - None - }, - } - }, - None => None, - }; - - AuthorityDiscovery { - client, - network, - sentry_nodes, - dht_event_rx, - publish_interval, - query_interval, - addr_cache, - role, - metrics, - phantom: PhantomData, - } - } - - /// Publish either our own or if specified the public addresses of our sentry nodes. - fn publish_ext_addresses(&mut self) -> Result<()> { - let key_store = match &self.role { - Role::Authority(key_store) => key_store, - // Only authority nodes can put addresses (their own or the ones of their sentry nodes) - // on the Dht. Sentry nodes don't have a known identity to authenticate such addresses, - // thus `publish_ext_addresses` becomes a no-op. - Role::Sentry => return Ok(()), - }; - - if let Some(metrics) = &self.metrics { - metrics.publish.inc() - } - - let addresses: Vec<_> = match &self.sentry_nodes { - Some(addrs) => addrs.clone().into_iter() - .map(|a| a.to_vec()) - .collect(), - None => self.network.external_addresses() - .into_iter() - .map(|a| a.with(multiaddr::Protocol::P2p( - self.network.local_peer_id().into(), - ))) - .map(|a| a.to_vec()) - .collect(), - }; - - if let Some(metrics) = &self.metrics { - metrics.amount_last_published.set(addresses.len() as u64); - } - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } - .encode(&mut serialized_addresses) - .map_err(Error::EncodingProto)?; - - let keys = AuthorityDiscovery::get_own_public_keys_within_authority_set( - &key_store, - &self.client, - )?.into_iter().map(Into::into).collect::>(); - - let signatures = key_store.read() - .sign_with_all( - key_types::AUTHORITY_DISCOVERY, - keys.clone(), - serialized_addresses.as_slice(), - ) - .map_err(|_| Error::Signing)?; - - for (sign_result, key) in signatures.into_iter().zip(keys) { - let mut signed_addresses = vec![]; - - // sign_with_all returns Result signature - // is generated for a public key that is supported. - // Verify that all signatures exist for all provided keys. - let signature = sign_result.map_err(|_| Error::MissingSignature(key.clone()))?; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto)?; - - self.network.put_value( - hash_authority_id(key.1.as_ref()), - signed_addresses, - ); - } - - Ok(()) - } - - fn request_addresses_of_others(&mut self) -> Result<()> { - let id = BlockId::hash(self.client.info().best_hash); - - let authorities = self - .client - .runtime_api() - .authorities(&id) - .map_err(Error::CallingRuntime)?; - - let local_keys = match &self.role { - Role::Authority(key_store) => { - key_store.read() - .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) - .into_iter() - .collect::>() - }, - Role::Sentry => HashSet::new(), - }; - - for authority_id in authorities.iter() { - // Make sure we don't look up our own keys. - if !local_keys.contains(authority_id.as_ref()) { - if let Some(metrics) = &self.metrics { - metrics.request.inc(); - } - - self.network - .get_value(&hash_authority_id(authority_id.as_ref())); - } - } - - Ok(()) - } - - /// Handle incoming Dht events. - /// - /// Returns either: - /// - Poll::Pending when there are no more events to handle or - /// - Poll::Ready(()) when the dht event stream terminated. - fn handle_dht_events(&mut self, cx: &mut Context) -> Poll<()>{ - loop { - match ready!(self.dht_event_rx.poll_next_unpin(cx)) { - Some(DhtEvent::ValueFound(v)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_found"]).inc(); - } - - if log_enabled!(log::Level::Debug) { - let hashes = v.iter().map(|(hash, _value)| hash.clone()); - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' found on Dht.", hashes, - ); - } - - if let Err(e) = self.handle_dht_value_found_event(v) { - if let Some(metrics) = &self.metrics { - metrics.handle_value_found_event_failure.inc(); - } - - debug!( - target: LOG_TARGET, - "Failed to handle Dht value found event: {:?}", e, - ); - } - } - Some(DhtEvent::ValueNotFound(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); - } - - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' not found on Dht.", hash - ) - }, - Some(DhtEvent::ValuePut(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_put"]).inc(); - } - - debug!( - target: LOG_TARGET, - "Successfully put hash '{:?}' on Dht.", hash, - ) - }, - Some(DhtEvent::ValuePutFailed(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); - } - - debug!( - target: LOG_TARGET, - "Failed to put hash '{:?}' on Dht.", hash - ) - }, - None => { - debug!(target: LOG_TARGET, "Dht event stream terminated."); - return Poll::Ready(()); - }, - } - } - } - - fn handle_dht_value_found_event( - &mut self, - values: Vec<(libp2p::kad::record::Key, Vec)>, - ) -> Result<()> { - // Ensure `values` is not empty and all its keys equal. - let remote_key = values.iter().fold(Ok(None), |acc, (key, _)| { - match acc { - Ok(None) => Ok(Some(key.clone())), - Ok(Some(ref prev_key)) if prev_key != key => Err( - Error::ReceivingDhtValueFoundEventWithDifferentKeys - ), - x @ Ok(_) => x, - Err(e) => Err(e), - } - })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; - - let authorities = { - let block_id = BlockId::hash(self.client.info().best_hash); - // From the Dht we only get the hashed authority id. In order to retrieve the actual - // authority id and to ensure it is actually an authority, we match the hash against the - // hash of the authority id of all other authorities. - let authorities = self.client.runtime_api().authorities(&block_id)?; - self.addr_cache.retain_ids(&authorities); - authorities - .into_iter() - .map(|id| (hash_authority_id(id.as_ref()), id)) - .collect::>() - }; - - // Check if the event origins from an authority in the current authority set. - let authority_id: &AuthorityId = authorities - .get(&remote_key) - .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; - - let local_peer_id = self.network.local_peer_id(); - - let remote_addresses: Vec = values.into_iter() - .map(|(_k, v)| { - let schema::SignedAuthorityAddresses { signature, addresses } = - schema::SignedAuthorityAddresses::decode(v.as_slice()) - .map_err(Error::DecodingProto)?; - - let signature = AuthoritySignature::decode(&mut &signature[..]) - .map_err(Error::EncodingDecodingScale)?; - - if !AuthorityPair::verify(&signature, &addresses, authority_id) { - return Err(Error::VerifyingDhtPayload); - } - - let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) - .map(|a| a.addresses) - .map_err(Error::DecodingProto)? - .into_iter() - .map(|a| a.try_into()) - .collect::>() - .map_err(Error::ParsingMultiaddress)?; - - Ok(addresses) - }) - .collect::>>>()? - .into_iter() - .flatten() - // Ignore own addresses. - .filter(|addr| !addr.iter().any(|protocol| { - // Parse to PeerId first as Multihashes of old and new PeerId - // representation don't equal. - // - // See https://github.com/libp2p/rust-libp2p/issues/555 for - // details. - if let multiaddr::Protocol::P2p(hash) = protocol { - let peer_id = match PeerId::from_multihash(hash) { - Ok(peer_id) => peer_id, - Err(_) => return true, // Discard address. - }; - - return peer_id == local_peer_id; - } - - false // Multiaddr does not contain a PeerId. - })) - .collect(); - - if !remote_addresses.is_empty() { - self.addr_cache.insert(authority_id.clone(), remote_addresses); - if let Some(metrics) = &self.metrics { - metrics.known_authorities_count.set( - self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) - ); - } - self.update_peer_set_priority_group()?; - } - - Ok(()) - } - - /// Retrieve our public keys within the current authority set. - // - // A node might have multiple authority discovery keys within its keystore, e.g. an old one and - // one for the upcoming session. In addition it could be participating in the current authority - // set with two keys. The function does not return all of the local authority discovery public - // keys, but only the ones intersecting with the current authority set. - fn get_own_public_keys_within_authority_set( - key_store: &BareCryptoStorePtr, - client: &Client, - ) -> Result> { - let local_pub_keys = key_store.read() - .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) - .into_iter() - .collect::>(); - - let id = BlockId::hash(client.info().best_hash); - let current_authorities = client.runtime_api() - .authorities(&id) - .map_err(Error::CallingRuntime)? - .into_iter() - .map(std::convert::Into::into) - .collect::>(); - - let intersection = local_pub_keys.intersection(¤t_authorities) - .cloned() - .map(std::convert::Into::into) - .collect(); - - Ok(intersection) - } - - /// Update the peer set 'authority' priority group. - fn update_peer_set_priority_group(&self) -> Result<()> { - let addresses = self.addr_cache.get_subset(); - - if let Some(metrics) = &self.metrics { - metrics.priority_group_size.set(addresses.len().try_into().unwrap_or(std::u64::MAX)); - } - - debug!( - target: LOG_TARGET, - "Applying priority group {:?} to peerset.", addresses, - ); - self.network - .set_priority_group( - AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), - addresses.into_iter().collect(), - ) - .map_err(Error::SettingPeersetPriorityGroup)?; - - Ok(()) - } -} - -impl Future for AuthorityDiscovery + prometheus_registry: Option, +) -> (Worker, Service) where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, -{ - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - // Process incoming events. - if let Poll::Ready(()) = self.handle_dht_events(cx) { - // `handle_dht_events` returns `Poll::Ready(())` when the Dht event stream terminated. - // Termination of the Dht event stream implies that the underlying network terminated, - // thus authority discovery should terminate as well. - return Poll::Ready(()); - } - - - // Publish own addresses. - if let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) { - // Register waker of underlying task for next interval. - while let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) {} - - if let Err(e) = self.publish_ext_addresses() { - error!( - target: LOG_TARGET, - "Failed to publish external addresses: {:?}", e, - ); - } - } - - // Request addresses of authorities. - if let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) { - // Register waker of underlying task for next interval. - while let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) {} - - if let Err(e) = self.request_addresses_of_others() { - error!( - target: LOG_TARGET, - "Failed to request addresses of authorities: {:?}", e, - ); - } - } - - Poll::Pending - } -} - -/// NetworkProvider provides AuthorityDiscovery with all necessary hooks into the underlying -/// Substrate networking. Using this trait abstraction instead of NetworkService directly is -/// necessary to unit test AuthorityDiscovery. -pub trait NetworkProvider: NetworkStateInfo { - /// Modify a peerset priority group. - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String>; - - /// Start putting a value in the Dht. - fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); - - /// Start getting a value from the Dht. - fn get_value(&self, key: &libp2p::kad::record::Key); -} - -impl NetworkProvider for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, + >::Api: AuthorityDiscoveryApi, { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group(group_id, peers) - } - fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { - self.put_value(key, value) - } - fn get_value(&self, key: &libp2p::kad::record::Key) { - self.get_value(key) - } -} - -fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { - libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) -} + let (to_worker, from_service) = mpsc::channel(0); -fn interval_at(start: Instant, duration: Duration) -> Interval { - let stream = futures::stream::unfold(start, move |next| { - let time_until_next = next.saturating_duration_since(Instant::now()); - - Delay::new(time_until_next).map(move |_| Some(((), next + duration))) - }); - - Box::new(stream) -} + let worker = Worker::new( + from_service, client, network, sentry_nodes, dht_event_rx, role, prometheus_registry, + ); + let service = Service::new(to_worker); -/// Prometheus metrics for an `AuthorityDiscovery`. -#[derive(Clone)] -pub(crate) struct Metrics { - publish: Counter, - amount_last_published: Gauge, - request: Counter, - dht_event_received: CounterVec, - handle_value_found_event_failure: Counter, - known_authorities_count: Gauge, - priority_group_size: Gauge, + (worker, service) } -impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { - Ok(Self { - publish: register( - Counter::new( - "authority_discovery_times_published_total", - "Number of times authority discovery has published external addresses." - )?, - registry, - )?, - amount_last_published: register( - Gauge::new( - "authority_discovery_amount_external_addresses_last_published", - "Number of external addresses published when authority discovery last \ - published addresses." - )?, - registry, - )?, - request: register( - Counter::new( - "authority_discovery_authority_addresses_requested_total", - "Number of times authority discovery has requested external addresses of a \ - single authority." - )?, - registry, - )?, - dht_event_received: register( - CounterVec::new( - Opts::new( - "authority_discovery_dht_event_received", - "Number of dht events received by authority discovery." - ), - &["name"], - )?, - registry, - )?, - handle_value_found_event_failure: register( - Counter::new( - "authority_discovery_handle_value_found_event_failure", - "Number of times handling a dht value found event failed." - )?, - registry, - )?, - known_authorities_count: register( - Gauge::new( - "authority_discovery_known_authorities_count", - "Number of authorities known by authority discovery." - )?, - registry, - )?, - priority_group_size: register( - Gauge::new( - "authority_discovery_priority_group_size", - "Number of addresses passed to the peer set as a priority group." - )?, - registry, - )?, - }) - } +/// Message send from the [`Service`] to the [`Worker`]. +pub(crate) enum ServicetoWorkerMsg { + /// See [`Service::get_addresses_by_authority_id`]. + GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), + /// See [`Service::get_authority_id_by_peer_id`]. + GetAuthorityIdByPeerId(PeerId, oneshot::Sender>) } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs new file mode 100644 index 00000000000..01fb7134fb5 --- /dev/null +++ b/client/authority-discovery/src/service.rs @@ -0,0 +1,70 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::ServicetoWorkerMsg; + +use futures::channel::{mpsc, oneshot}; +use futures::SinkExt; + +use sc_network::{Multiaddr, PeerId}; +use sp_authority_discovery::AuthorityId; + +/// Service to interact with the [`Worker`]. +#[derive(Clone)] +pub struct Service { + to_worker: mpsc::Sender, +} + +/// A [`Service`] allows to interact with a [`Worker`], e.g. by querying the +/// [`Worker`]'s local address cache for a given [`AuthorityId`]. +impl Service { + pub(crate) fn new(to_worker: mpsc::Sender) -> Self { + Self { + to_worker, + } + } + + /// Get the addresses for the given [`AuthorityId`] from the local address cache. + /// + /// Returns `None` if no entry was present or connection to the [`crate::Worker`] failed. + /// + /// [`Multiaddr`]s returned always include a [`libp2p::core::multiaddr:Protocol::P2p`] + /// component. + pub async fn get_addresses_by_authority_id(&mut self, authority: AuthorityId) -> Option> { + let (tx, rx) = oneshot::channel(); + + self.to_worker + .send(ServicetoWorkerMsg::GetAddressesByAuthorityId(authority, tx)) + .await + .ok()?; + + rx.await.ok().flatten() + } + + /// Get the [`AuthorityId`] for the given [`PeerId`] from the local address cache. + /// + /// Returns `None` if no entry was present or connection to the [`crate::Worker`] failed. + pub async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option { + let (tx, rx) = oneshot::channel(); + + self.to_worker + .send(ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, tx)) + .await + .ok()?; + + rx.await.ok().flatten() + } +} diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 09a65fd138c..8e7367f2f78 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -16,315 +16,42 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{iter::FromIterator, sync::{Arc, Mutex}}; +use crate::{new_worker_and_service, worker::{tests::{TestApi, TestNetwork}, Role}}; +use std::sync::Arc; + +use futures::prelude::*; use futures::channel::mpsc::channel; -use futures::executor::{block_on, LocalPool}; -use futures::future::{poll_fn, FutureExt}; -use futures::sink::SinkExt; +use futures::executor::LocalPool; use futures::task::LocalSpawn; -use futures::poll; -use libp2p::{kad, core::multiaddr, PeerId}; - -use sp_api::{ProvideRuntimeApi, ApiRef}; -use sp_core::{crypto::Public, testing::KeyStore}; -use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; -use substrate_test_runtime_client::runtime::Block; - -use super::*; - -#[test] -fn interval_at_with_start_now() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now(), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_secs(1), - "Expected low resolution instant interval to fire within less than a second.", - ); -} +use libp2p::core::{multiaddr::{Multiaddr, Protocol}, PeerId}; -#[test] -fn interval_at_is_queuing_ticks() { - let start = Instant::now(); - - let interval = interval_at(start, std::time::Duration::from_millis(100)); - - // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd - // at 200ms). - std::thread::sleep(Duration::from_millis(200)); - - futures::executor::block_on(async { - interval.take(3).collect::>().await; - }); - - // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is - // not queuing ticks. - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_millis(300), - "Expect interval to /queue/ events when not polled for a while.", - ); -} +use sp_authority_discovery::AuthorityId; +use sp_core::crypto::key_types; +use sp_core::testing::KeyStore; #[test] -fn interval_at_with_initial_delay() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now() + Duration::from_millis(100), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) > Duration::from_millis(100), - "Expected interval with initial delay not to fire right away.", - ); -} - -#[derive(Clone)] -struct TestApi { - authorities: Vec, -} - -impl ProvideRuntimeApi for TestApi { - type Api = RuntimeApi; - - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { - authorities: self.authorities.clone(), - }.into() - } -} - -/// Blockchain database header backend. Does not perform any validation. -impl HeaderBackend for TestApi { - fn header( - &self, - _id: BlockId, - ) -> std::result::Result, sp_blockchain::Error> { - Ok(None) - } - - fn info(&self) -> sc_client_api::blockchain::Info { - sc_client_api::blockchain::Info { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - number_leaves: Default::default(), - } - } - - fn status( - &self, - _id: BlockId, - ) -> std::result::Result { - Ok(sc_client_api::blockchain::BlockStatus::Unknown) - } - - fn number( - &self, - _hash: Block::Hash, - ) -> std::result::Result>, sp_blockchain::Error> { - Ok(None) - } - - fn hash( - &self, - _number: NumberFor, - ) -> std::result::Result, sp_blockchain::Error> { - Ok(None) - } -} - -struct RuntimeApi { - authorities: Vec, -} - -sp_api::mock_impl_runtime_apis! { - impl AuthorityDiscoveryApi for RuntimeApi { - type Error = sp_blockchain::Error; - - fn authorities(&self) -> Vec { - self.authorities.clone() - } - } -} - -struct TestNetwork { - peer_id: PeerId, - // Whenever functions on `TestNetwork` are called, the function arguments are added to the - // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, - pub set_priority_group_call: Arc)>>>, -} - -impl Default for TestNetwork { - fn default() -> Self { - TestNetwork { - peer_id: PeerId::random(), - put_value_call: Default::default(), - get_value_call: Default::default(), - set_priority_group_call: Default::default(), - } - } -} - -impl NetworkProvider for TestNetwork { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group_call - .lock() - .unwrap() - .push((group_id, peers)); - Ok(()) - } - fn put_value(&self, key: kad::record::Key, value: Vec) { - self.put_value_call.lock().unwrap().push((key, value)); - } - fn get_value(&self, key: &kad::record::Key) { - self.get_value_call.lock().unwrap().push(key.clone()); - } -} - -impl NetworkStateInfo for TestNetwork { - fn local_peer_id(&self) -> PeerId { - self.peer_id.clone() - } - - fn external_addresses(&self) -> Vec { - vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()] - } -} - -#[test] -fn new_registers_metrics() { - let (_dht_event_tx, dht_event_rx) = channel(1000); +fn get_addresses_and_authority_id() { + let (_dht_event_tx, dht_event_rx) = channel(0); let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - - let registry = prometheus_endpoint::Registry::new(); - - AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - Some(registry.clone()), - ); - - assert!(registry.gather().len() > 0); -} - -#[test] -fn request_addresses_of_others_triggers_dht_get_query() { - let _ = ::env_logger::try_init(); - let (_dht_event_tx, dht_event_rx) = channel(1000); - // Generate authority keys - let authority_1_key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); - let authority_2_key_pair = AuthorityPair::from_seed_slice(&[2; 32]).unwrap(); - - let test_api = Arc::new(TestApi { - authorities: vec![authority_1_key_pair.public(), authority_2_key_pair.public()], - }); - - let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - authority_discovery.request_addresses_of_others().unwrap(); - - // Expect authority discovery to request new records from the dht. - assert_eq!(network.get_value_call.lock().unwrap().len(), 2); -} - -#[test] -fn publish_discover_cycle() { - let _ = ::env_logger::try_init(); - - // Node A publishing its address. - - let (_dht_event_tx, dht_event_rx) = channel(1000); - - let network: Arc = Arc::new(Default::default()); - let node_a_multiaddr = { - let peer_id = network.local_peer_id(); - let address = network.external_addresses().pop().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; - - let key_store = KeyStore::new(); - let node_a_public = key_store + let remote_authority_id: AuthorityId = key_store .write() .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![node_a_public.into()], - }); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); + .unwrap() + .into(); - authority_discovery.publish_ext_addresses().unwrap(); + let remote_peer_id = PeerId::random(); + let remote_addr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + .unwrap() + .with(Protocol::P2p(remote_peer_id.clone().into())); - // Expect authority discovery to put a new record onto the dht. - assert_eq!(network.put_value_call.lock().unwrap().len(), 1); - - let dht_event = { - let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) - }; - - // Node B discovering node A's address. - - let (mut dht_event_tx, dht_event_rx) = channel(1000); let test_api = Arc::new(TestApi { - // Make sure node B identifies node A as an authority. - authorities: vec![node_a_public.into()], + authorities: vec![], }); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let mut authority_discovery = AuthorityDiscovery::new( + let (mut worker, mut service) = new_worker_and_service( test_api, network.clone(), vec![], @@ -333,230 +60,19 @@ fn publish_discover_cycle() { None, ); - dht_event_tx.try_send(dht_event).unwrap(); + worker.inject_addresses(remote_authority_id.clone(), vec![remote_addr.clone()]); - let f = |cx: &mut Context<'_>| -> Poll<()> { - // Make authority discovery handle the event. - if let Poll::Ready(e) = authority_discovery.handle_dht_events(cx) { - panic!("Unexpected error: {:?}", e); - } - - // Expect authority discovery to set the priority set. - assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); + let mut pool = LocalPool::new(); + pool.spawner().spawn_local_obj(Box::pin(worker).into()).unwrap(); + pool.run_until(async { assert_eq!( - network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![node_a_multiaddr.clone()].into_iter()) - ) + Some(vec![remote_addr]), + service.get_addresses_by_authority_id(remote_authority_id.clone()).await, ); - - Poll::Ready(()) - }; - - let _ = block_on(poll_fn(f)); -} - -#[test] -fn terminate_when_event_stream_terminates() { - let (dht_event_tx, dht_event_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - block_on(async { - assert_eq!(Poll::Pending, poll!(&mut authority_discovery)); - - // Simulate termination of the network through dropping the sender side of the dht event - // channel. - drop(dht_event_tx); - assert_eq!( - Poll::Ready(()), poll!(&mut authority_discovery), - "Expect the authority discovery module to terminate once the sending side of the dht \ - event channel is terminated.", + Some(remote_authority_id), + service.get_authority_id_by_peer_id(remote_peer_id).await, ); }); } - -#[test] -fn dont_stop_polling_when_error_is_returned() { - #[derive(PartialEq, Debug)] - enum Event { - Processed, - End, - }; - - let (mut dht_event_tx, dht_event_rx) = channel(1000); - let (mut discovery_update_tx, mut discovery_update_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - let mut pool = LocalPool::new(); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - // Spawn the authority discovery to make sure it is polled independently. - // - // As this is a local pool, only one future at a time will have the CPU and - // can make progress until the future returns `Pending`. - pool.spawner().spawn_local_obj( - futures::future::poll_fn(move |ctx| { - match std::pin::Pin::new(&mut authority_discovery).poll(ctx) { - Poll::Ready(()) => {}, - Poll::Pending => { - discovery_update_tx.send(Event::Processed).now_or_never(); - return Poll::Pending; - }, - } - let _ = discovery_update_tx.send(Event::End).now_or_never().unwrap(); - Poll::Ready(()) - }).boxed_local().into(), - ).expect("Spawns authority discovery"); - - pool.run_until( - // The future that drives the event stream - async { - // Send an event that should generate an error - let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); - // Send the same event again to make sure that the event stream needs to be polled twice - // to be woken up again. - let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); - - // Now we call `await` and give the control to the authority discovery future. - assert_eq!(Some(Event::Processed), discovery_update_rx.next().await); - - // Drop the event rx to stop the authority discovery. If it was polled correctly, it - // should end properly. - drop(dht_event_tx); - - assert!( - discovery_update_rx.collect::>() - .await - .into_iter() - .any(|evt| evt == Event::End), - "The authority discovery should have ended", - ); - } - ); -} - -/// In the scenario of a validator publishing the address of its sentry node to -/// the DHT, said sentry node should not add its own Multiaddr to the -/// peerset "authority" priority group. -#[test] -fn never_add_own_address_to_priority_group() { - let validator_key_store = KeyStore::new(); - let validator_public = validator_key_store - .write() - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .unwrap(); - - let sentry_network: Arc = Arc::new(Default::default()); - - let sentry_multiaddr = { - let peer_id = sentry_network.local_peer_id(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; - - // Address of some other sentry node of `validator`. - let random_multiaddr = { - let peer_id = PeerId::random(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; - - let dht_event = { - let addresses = vec![ - sentry_multiaddr.to_vec(), - random_multiaddr.to_vec(), - ]; - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } - .encode(&mut serialized_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let signature = validator_key_store.read() - .sign_with( - key_types::AUTHORITY_DISCOVERY, - &validator_public.clone().into(), - serialized_addresses.as_slice(), - ) - .map_err(|_| Error::Signing) - .unwrap(); - - let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let key = hash_authority_id(&validator_public.to_raw_vec()); - let value = signed_addresses; - (key, value) - }; - - let (_dht_event_tx, dht_event_rx) = channel(1); - let sentry_test_api = Arc::new(TestApi { - // Make sure the sentry node identifies its validator as an authority. - authorities: vec![validator_public.into()], - }); - - let mut sentry_authority_discovery = AuthorityDiscovery::new( - sentry_test_api, - sentry_network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Sentry, - None, - ); - - sentry_authority_discovery.handle_dht_value_found_event(vec![dht_event]).unwrap(); - - assert_eq!( - sentry_network.set_priority_group_call.lock().unwrap().len(), 1, - "Expect authority discovery to set the priority set.", - ); - - assert_eq!( - sentry_network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![random_multiaddr.clone()].into_iter(),) - ), - "Expect authority discovery to only add `random_multiaddr`." - ); -} diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs new file mode 100644 index 00000000000..dd13b89278e --- /dev/null +++ b/client/authority-discovery/src/worker.rs @@ -0,0 +1,785 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::{error::{Error, Result}, ServicetoWorkerMsg}; + +use std::collections::{HashMap, HashSet}; +use std::convert::TryInto; +use std::marker::PhantomData; +use std::pin::Pin; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::channel::mpsc; +use futures::task::{Context, Poll}; +use futures::{Future, FutureExt, ready, Stream, StreamExt, stream::Fuse}; +use futures_timer::Delay; + +use addr_cache::AddrCache; +use codec::Decode; +use libp2p::core::multiaddr; +use log::{debug, error, log_enabled}; +use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; +use prost::Message; +use sc_client_api::blockchain::HeaderBackend; +use sc_network::{ + config::MultiaddrWithPeerId, + DhtEvent, + ExHashT, + Multiaddr, + NetworkStateInfo, + PeerId, +}; +use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; +use sp_core::crypto::{key_types, Pair}; +use sp_core::traits::BareCryptoStorePtr; +use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sp_api::ProvideRuntimeApi; + +mod addr_cache; +/// Dht payload schemas generated from Protobuf definitions via Prost crate in build.rs. +mod schema { include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } +#[cfg(test)] +pub mod tests; + +type Interval = Box + Unpin + Send + Sync>; + +const LOG_TARGET: &'static str = "sub-authority-discovery"; + +/// Upper bound estimation on how long one should wait before accessing the Kademlia DHT. +const LIBP2P_KADEMLIA_BOOTSTRAP_TIME: Duration = Duration::from_secs(30); + +/// Name of the Substrate peerset priority group for authorities discovered through the authority +/// discovery module. +const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; + +/// Role an authority discovery module can run as. +pub enum Role { + /// Actual authority as well as a reference to its key store. + Authority(BareCryptoStorePtr), + /// Sentry node that guards an authority. + /// + /// No reference to its key store needed, as sentry nodes don't have an identity to sign + /// addresses with in the first place. + Sentry, +} + +/// A [`Worker`] makes a given authority discoverable and discovers other +/// authorities. +/// +/// The [`Worker`] implements the Future trait. By +/// polling [`Worker`] an authority: +/// +/// 1. **Makes itself discoverable** +/// +/// 1. Retrieves its external addresses (including peer id) or the ones of +/// its sentry nodes. +/// +/// 2. Signs the above. +/// +/// 3. Puts the signature and the addresses on the libp2p Kademlia DHT. +/// +/// +/// 2. **Discovers other authorities** +/// +/// 1. Retrieves the current set of authorities. +/// +/// 2. Starts DHT queries for the ids of the authorities. +/// +/// 3. Validates the signatures of the retrieved key value pairs. +/// +/// 4. Adds the retrieved external addresses as priority nodes to the +/// peerset. +/// +/// When run as a sentry node, the [`Worker`] does not publish +/// any addresses to the DHT but still discovers validators and sentry nodes of +/// validators, i.e. only step 2 (Discovers other authorities) is executed. +pub struct Worker +where + Block: BlockT + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, +{ + /// Channel receiver for messages send by an [`Service`]. + from_service: Fuse>, + + client: Arc, + + network: Arc, + /// List of sentry node public addresses. + // + // There are 3 states: + // - None: No addresses were specified. + // - Some(vec![]): Addresses were specified, but none could be parsed as proper + // Multiaddresses. + // - Some(vec![a, b, c, ...]): Valid addresses were specified. + sentry_nodes: Option>, + /// Channel we receive Dht events on. + dht_event_rx: Pin + Send>>, + + /// Interval to be proactive, publishing own addresses. + publish_interval: Interval, + /// Interval on which to query for addresses of other authorities. + query_interval: Interval, + /// Interval on which to set the peerset priority group to a new random + /// set of addresses. + priority_group_set_interval: Interval, + + addr_cache: addr_cache::AddrCache, + + metrics: Option, + + role: Role, + + phantom: PhantomData, +} + +impl Worker +where + Block: BlockT + Unpin + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: + AuthorityDiscoveryApi, + Self: Future, +{ + /// Return a new [`Worker`]. + /// + /// Note: When specifying `sentry_nodes` this module will not advertise the public addresses of + /// the node itself but only the public addresses of its sentry nodes. + pub(crate) fn new( + from_service: mpsc::Receiver, + client: Arc, + network: Arc, + sentry_nodes: Vec, + dht_event_rx: Pin + Send>>, + role: Role, + prometheus_registry: Option, + ) -> Self { + // Kademlia's default time-to-live for Dht records is 36h, republishing records every 24h. + // Given that a node could restart at any point in time, one can not depend on the + // republishing process, thus publishing own external addresses should happen on an interval + // < 36h. + let publish_interval = interval_at( + Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, + Duration::from_secs(12 * 60 * 60), + ); + + // External addresses of other authorities can change at any given point in time. The + // interval on which to query for external addresses of other authorities is a trade off + // between efficiency and performance. + let query_interval_duration = Duration::from_secs(60); + let query_interval_start = Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME; + let query_interval = interval_at(query_interval_start, query_interval_duration); + + // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when + // comparing `authority_discovery_authority_addresses_requested_total` and + // `authority_discovery_dht_event_received`. With that in mind set the peerset priority + // group on the same interval as the [`query_interval`] above, just delayed by 2 minutes. + let priority_group_set_interval = interval_at( + query_interval_start + Duration::from_secs(2 * 60), + query_interval_duration, + ); + + let sentry_nodes = if !sentry_nodes.is_empty() { + Some(sentry_nodes.into_iter().map(|ma| ma.concat()).collect::>()) + } else { + None + }; + + let addr_cache = AddrCache::new(); + + let metrics = match prometheus_registry { + Some(registry) => { + match Metrics::register(®istry) { + Ok(metrics) => Some(metrics), + Err(e) => { + error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + }, + } + }, + None => None, + }; + + Worker { + from_service: from_service.fuse(), + client, + network, + sentry_nodes, + dht_event_rx, + publish_interval, + query_interval, + priority_group_set_interval, + addr_cache, + role, + metrics, + phantom: PhantomData, + } + } + + /// Publish either our own or if specified the public addresses of our sentry nodes. + fn publish_ext_addresses(&mut self) -> Result<()> { + let key_store = match &self.role { + Role::Authority(key_store) => key_store, + // Only authority nodes can put addresses (their own or the ones of their sentry nodes) + // on the Dht. Sentry nodes don't have a known identity to authenticate such addresses, + // thus `publish_ext_addresses` becomes a no-op. + Role::Sentry => return Ok(()), + }; + + if let Some(metrics) = &self.metrics { + metrics.publish.inc() + } + + let addresses: Vec<_> = match &self.sentry_nodes { + Some(addrs) => addrs.clone().into_iter() + .map(|a| a.to_vec()) + .collect(), + None => self.network.external_addresses() + .into_iter() + .map(|a| a.with(multiaddr::Protocol::P2p( + self.network.local_peer_id().into(), + ))) + .map(|a| a.to_vec()) + .collect(), + }; + + if let Some(metrics) = &self.metrics { + metrics.amount_last_published.set(addresses.len() as u64); + } + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto)?; + + let keys = Worker::get_own_public_keys_within_authority_set( + &key_store, + &self.client, + )?.into_iter().map(Into::into).collect::>(); + + let signatures = key_store.read() + .sign_with_all( + key_types::AUTHORITY_DISCOVERY, + keys.clone(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing)?; + + for (sign_result, key) in signatures.into_iter().zip(keys) { + let mut signed_addresses = vec![]; + + // sign_with_all returns Result signature + // is generated for a public key that is supported. + // Verify that all signatures exist for all provided keys. + let signature = sign_result.map_err(|_| Error::MissingSignature(key.clone()))?; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto)?; + + self.network.put_value( + hash_authority_id(key.1.as_ref()), + signed_addresses, + ); + } + + Ok(()) + } + + fn request_addresses_of_others(&mut self) -> Result<()> { + let id = BlockId::hash(self.client.info().best_hash); + + let authorities = self + .client + .runtime_api() + .authorities(&id) + .map_err(Error::CallingRuntime)?; + + let local_keys = match &self.role { + Role::Authority(key_store) => { + key_store.read() + .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) + .into_iter() + .collect::>() + }, + Role::Sentry => HashSet::new(), + }; + + for authority_id in authorities.iter() { + // Make sure we don't look up our own keys. + if !local_keys.contains(authority_id.as_ref()) { + if let Some(metrics) = &self.metrics { + metrics.request.inc(); + } + + self.network + .get_value(&hash_authority_id(authority_id.as_ref())); + } + } + + Ok(()) + } + + /// Handle incoming Dht events. + /// + /// Returns either: + /// - Poll::Pending when there are no more events to handle or + /// - Poll::Ready(()) when the dht event stream terminated. + fn handle_dht_events(&mut self, cx: &mut Context) -> Poll<()>{ + loop { + match ready!(self.dht_event_rx.poll_next_unpin(cx)) { + Some(DhtEvent::ValueFound(v)) => { + if let Some(metrics) = &self.metrics { + metrics.dht_event_received.with_label_values(&["value_found"]).inc(); + } + + if log_enabled!(log::Level::Debug) { + let hashes = v.iter().map(|(hash, _value)| hash.clone()); + debug!( + target: LOG_TARGET, + "Value for hash '{:?}' found on Dht.", hashes, + ); + } + + if let Err(e) = self.handle_dht_value_found_event(v) { + if let Some(metrics) = &self.metrics { + metrics.handle_value_found_event_failure.inc(); + } + + debug!( + target: LOG_TARGET, + "Failed to handle Dht value found event: {:?}", e, + ); + } + } + Some(DhtEvent::ValueNotFound(hash)) => { + if let Some(metrics) = &self.metrics { + metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); + } + + debug!( + target: LOG_TARGET, + "Value for hash '{:?}' not found on Dht.", hash + ) + }, + Some(DhtEvent::ValuePut(hash)) => { + if let Some(metrics) = &self.metrics { + metrics.dht_event_received.with_label_values(&["value_put"]).inc(); + } + + debug!( + target: LOG_TARGET, + "Successfully put hash '{:?}' on Dht.", hash, + ) + }, + Some(DhtEvent::ValuePutFailed(hash)) => { + if let Some(metrics) = &self.metrics { + metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); + } + + debug!( + target: LOG_TARGET, + "Failed to put hash '{:?}' on Dht.", hash + ) + }, + None => { + debug!(target: LOG_TARGET, "Dht event stream terminated."); + return Poll::Ready(()); + }, + } + } + } + + fn handle_dht_value_found_event( + &mut self, + values: Vec<(libp2p::kad::record::Key, Vec)>, + ) -> Result<()> { + // Ensure `values` is not empty and all its keys equal. + let remote_key = values.iter().fold(Ok(None), |acc, (key, _)| { + match acc { + Ok(None) => Ok(Some(key.clone())), + Ok(Some(ref prev_key)) if prev_key != key => Err( + Error::ReceivingDhtValueFoundEventWithDifferentKeys + ), + x @ Ok(_) => x, + Err(e) => Err(e), + } + })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; + + let authorities = { + let block_id = BlockId::hash(self.client.info().best_hash); + // From the Dht we only get the hashed authority id. In order to retrieve the actual + // authority id and to ensure it is actually an authority, we match the hash against the + // hash of the authority id of all other authorities. + let authorities = self.client.runtime_api().authorities(&block_id)?; + self.addr_cache.retain_ids(&authorities); + authorities + .into_iter() + .map(|id| (hash_authority_id(id.as_ref()), id)) + .collect::>() + }; + + // Check if the event origins from an authority in the current authority set. + let authority_id: &AuthorityId = authorities + .get(&remote_key) + .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; + + let local_peer_id = self.network.local_peer_id(); + + let remote_addresses: Vec = values.into_iter() + .map(|(_k, v)| { + let schema::SignedAuthorityAddresses { signature, addresses } = + schema::SignedAuthorityAddresses::decode(v.as_slice()) + .map_err(Error::DecodingProto)?; + + let signature = AuthoritySignature::decode(&mut &signature[..]) + .map_err(Error::EncodingDecodingScale)?; + + if !AuthorityPair::verify(&signature, &addresses, authority_id) { + return Err(Error::VerifyingDhtPayload); + } + + let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) + .map(|a| a.addresses) + .map_err(Error::DecodingProto)? + .into_iter() + .map(|a| a.try_into()) + .collect::>() + .map_err(Error::ParsingMultiaddress)?; + + Ok(addresses) + }) + .collect::>>>()? + .into_iter() + .flatten() + // Ignore [`Multiaddr`]s without [`PeerId`] and own addresses. + .filter(|addr| addr.iter().any(|protocol| { + // Parse to PeerId first as Multihashes of old and new PeerId + // representation don't equal. + // + // See https://github.com/libp2p/rust-libp2p/issues/555 for + // details. + if let multiaddr::Protocol::P2p(hash) = protocol { + let peer_id = match PeerId::from_multihash(hash) { + Ok(peer_id) => peer_id, + Err(_) => return false, // Discard address. + }; + + // Discard if equal to local peer id, keep if it differs. + return !(peer_id == local_peer_id); + } + + false // `protocol` is not a [`Protocol::P2p`], let's keep looking. + })) + .collect(); + + if !remote_addresses.is_empty() { + self.addr_cache.insert(authority_id.clone(), remote_addresses); + if let Some(metrics) = &self.metrics { + metrics.known_authorities_count.set( + self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) + ); + } + } + + Ok(()) + } + + /// Retrieve our public keys within the current authority set. + // + // A node might have multiple authority discovery keys within its keystore, e.g. an old one and + // one for the upcoming session. In addition it could be participating in the current authority + // set with two keys. The function does not return all of the local authority discovery public + // keys, but only the ones intersecting with the current authority set. + fn get_own_public_keys_within_authority_set( + key_store: &BareCryptoStorePtr, + client: &Client, + ) -> Result> { + let local_pub_keys = key_store.read() + .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) + .into_iter() + .collect::>(); + + let id = BlockId::hash(client.info().best_hash); + let current_authorities = client.runtime_api() + .authorities(&id) + .map_err(Error::CallingRuntime)? + .into_iter() + .map(std::convert::Into::into) + .collect::>(); + + let intersection = local_pub_keys.intersection(¤t_authorities) + .cloned() + .map(std::convert::Into::into) + .collect(); + + Ok(intersection) + } + + /// Set the peer set 'authority' priority group to a new random set of + /// [`Multiaddr`]s. + fn set_priority_group(&self) -> Result<()> { + let addresses = self.addr_cache.get_random_subset(); + + if addresses.is_empty() { + debug!( + target: LOG_TARGET, + "Got no addresses in cache for peerset priority group.", + ); + return Ok(()); + } + + if let Some(metrics) = &self.metrics { + metrics.priority_group_size.set(addresses.len().try_into().unwrap_or(std::u64::MAX)); + } + + debug!( + target: LOG_TARGET, + "Applying priority group {:?} to peerset.", addresses, + ); + + self.network + .set_priority_group( + AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), + addresses.into_iter().collect(), + ) + .map_err(Error::SettingPeersetPriorityGroup)?; + + Ok(()) + } +} + +impl Future for Worker +where + Block: BlockT + Unpin + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: + AuthorityDiscoveryApi, +{ + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + // Process incoming events. + if let Poll::Ready(()) = self.handle_dht_events(cx) { + // `handle_dht_events` returns `Poll::Ready(())` when the Dht event stream terminated. + // Termination of the Dht event stream implies that the underlying network terminated, + // thus authority discovery should terminate as well. + return Poll::Ready(()); + } + + // Publish own addresses. + if let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) { + // Register waker of underlying task for next interval. + while let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) {} + + if let Err(e) = self.publish_ext_addresses() { + error!( + target: LOG_TARGET, + "Failed to publish external addresses: {:?}", e, + ); + } + } + + // Request addresses of authorities. + if let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) { + // Register waker of underlying task for next interval. + while let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) {} + + if let Err(e) = self.request_addresses_of_others() { + error!( + target: LOG_TARGET, + "Failed to request addresses of authorities: {:?}", e, + ); + } + } + + // Set peerset priority group to a new random set of addresses. + if let Poll::Ready(_) = self.priority_group_set_interval.poll_next_unpin(cx) { + // Register waker of underlying task for next interval. + while let Poll::Ready(_) = self.priority_group_set_interval.poll_next_unpin(cx) {} + + if let Err(e) = self.set_priority_group() { + error!( + target: LOG_TARGET, + "Failed to set priority group: {:?}", e, + ); + } + } + + // Handle messages from [`Service`]. + while let Poll::Ready(Some(msg)) = self.from_service.poll_next_unpin(cx) { + match msg { + ServicetoWorkerMsg::GetAddressesByAuthorityId(authority, sender) => { + let _ = sender.send( + self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone), + ); + } + ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { + let _ = sender.send( + self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone), + ); + } + } + } + + Poll::Pending + } +} + +/// NetworkProvider provides [`Worker`] with all necessary hooks into the +/// underlying Substrate networking. Using this trait abstraction instead of [`NetworkService`] +/// directly is necessary to unit test [`Worker`]. +pub trait NetworkProvider: NetworkStateInfo { + /// Modify a peerset priority group. + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String>; + + /// Start putting a value in the Dht. + fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); + + /// Start getting a value from the Dht. + fn get_value(&self, key: &libp2p::kad::record::Key); +} + +impl NetworkProvider for sc_network::NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String> { + self.set_priority_group(group_id, peers) + } + fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { + self.put_value(key, value) + } + fn get_value(&self, key: &libp2p::kad::record::Key) { + self.get_value(key) + } +} + +fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { + libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) +} + +fn interval_at(start: Instant, duration: Duration) -> Interval { + let stream = futures::stream::unfold(start, move |next| { + let time_until_next = next.saturating_duration_since(Instant::now()); + + Delay::new(time_until_next).map(move |_| Some(((), next + duration))) + }); + + Box::new(stream) +} + +/// Prometheus metrics for a [`Worker`]. +#[derive(Clone)] +pub(crate) struct Metrics { + publish: Counter, + amount_last_published: Gauge, + request: Counter, + dht_event_received: CounterVec, + handle_value_found_event_failure: Counter, + known_authorities_count: Gauge, + priority_group_size: Gauge, +} + +impl Metrics { + pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + Ok(Self { + publish: register( + Counter::new( + "authority_discovery_times_published_total", + "Number of times authority discovery has published external addresses." + )?, + registry, + )?, + amount_last_published: register( + Gauge::new( + "authority_discovery_amount_external_addresses_last_published", + "Number of external addresses published when authority discovery last \ + published addresses." + )?, + registry, + )?, + request: register( + Counter::new( + "authority_discovery_authority_addresses_requested_total", + "Number of times authority discovery has requested external addresses of a \ + single authority." + )?, + registry, + )?, + dht_event_received: register( + CounterVec::new( + Opts::new( + "authority_discovery_dht_event_received", + "Number of dht events received by authority discovery." + ), + &["name"], + )?, + registry, + )?, + handle_value_found_event_failure: register( + Counter::new( + "authority_discovery_handle_value_found_event_failure", + "Number of times handling a dht value found event failed." + )?, + registry, + )?, + known_authorities_count: register( + Gauge::new( + "authority_discovery_known_authorities_count", + "Number of authorities known by authority discovery." + )?, + registry, + )?, + priority_group_size: register( + Gauge::new( + "authority_discovery_priority_group_size", + "Number of addresses passed to the peer set as a priority group." + )?, + registry, + )?, + }) + } +} + +// Helper functions for unit testing. +#[cfg(test)] +impl Worker +where + Block: BlockT + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, +{ + pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec) { + self.addr_cache.insert(authority, addresses); + } +} diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs new file mode 100644 index 00000000000..a2cd3f33e92 --- /dev/null +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -0,0 +1,233 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use libp2p::core::multiaddr::{Multiaddr, Protocol}; +use rand::seq::SliceRandom; +use std::collections::HashMap; + +use sp_authority_discovery::AuthorityId; +use sc_network::PeerId; + +/// The maximum number of authority connections initialized through the authority discovery module. +/// +/// In other words the maximum size of the `authority` peerset priority group. +const MAX_NUM_AUTHORITY_CONN: usize = 10; + +/// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. +pub(super) struct AddrCache { + authority_id_to_addresses: HashMap>, + peer_id_to_authority_id: HashMap, +} + +impl AddrCache { + pub fn new() -> Self { + AddrCache { + authority_id_to_addresses: HashMap::new(), + peer_id_to_authority_id: HashMap::new(), + } + } + + /// Inserts the given [`AuthorityId`] and [`Vec`] pair for future lookups by + /// [`AuthorityId`] or [`PeerId`]. + pub fn insert(&mut self, authority_id: AuthorityId, mut addresses: Vec) { + if addresses.is_empty() { + return; + } + + // Insert into `self.peer_id_to_authority_id`. + let peer_ids = addresses.iter() + .map(|a| peer_id_from_multiaddr(a)) + .filter_map(|peer_id| peer_id); + for peer_id in peer_ids { + self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()); + } + + // Insert into `self.authority_id_to_addresses`. + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + self.authority_id_to_addresses.insert(authority_id, addresses); + } + + /// Returns the number of authority IDs in the cache. + pub fn num_ids(&self) -> usize { + self.authority_id_to_addresses.len() + } + + /// Returns the addresses for the given [`AuthorityId`]. + pub fn get_addresses_by_authority_id(&self, authority_id: &AuthorityId) -> Option<&Vec> { + self.authority_id_to_addresses.get(&authority_id) + } + + /// Returns the [`AuthorityId`] for the given [`PeerId`]. + pub fn get_authority_id_by_peer_id(&self, peer_id: &PeerId) -> Option<&AuthorityId> { + self.peer_id_to_authority_id.get(peer_id) + } + + /// Returns a single address for a random subset (maximum of [`MAX_NUM_AUTHORITY_CONN`]) of all + /// known authorities. + pub fn get_random_subset(&self) -> Vec { + let mut rng = rand::thread_rng(); + + let mut addresses = self + .authority_id_to_addresses + .iter() + .filter_map(|(_authority_id, addresses)| { + debug_assert!(!addresses.is_empty()); + addresses + .choose(&mut rng) + }) + .collect::>(); + + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + addresses.dedup(); + + addresses + .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) + .map(|a| (**a).clone()) + .collect() + } + + /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given + /// [`AuthorityId`]s. + pub fn retain_ids(&mut self, authority_ids: &Vec) { + // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. + let authority_ids_to_remove = self.authority_id_to_addresses.iter() + .filter(|(id, _addresses)| !authority_ids.contains(id)) + .map(|entry| entry.0) + .cloned() + .collect::>(); + + for authority_id_to_remove in authority_ids_to_remove { + // Remove other entries from `self.authority_id_to_addresses`. + let addresses = self.authority_id_to_addresses.remove(&authority_id_to_remove); + + // Remove other entries from `self.peer_id_to_authority_id`. + let peer_ids = addresses.iter() + .flatten() + .map(|a| peer_id_from_multiaddr(a)) + .filter_map(|peer_id| peer_id); + for peer_id in peer_ids { + if let Some(id) = self.peer_id_to_authority_id.remove(&peer_id) { + debug_assert_eq!(authority_id_to_remove, id); + } + } + } + } +} + +fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { + addr.iter().last().and_then(|protocol| if let Protocol::P2p(multihash) = protocol { + PeerId::from_multihash(multihash).ok() + } else { + None + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + use libp2p::multihash; + use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; + use rand::Rng; + + use sp_authority_discovery::{AuthorityId, AuthorityPair}; + use sp_core::crypto::Pair; + + #[derive(Clone, Debug)] + struct TestAuthorityId(AuthorityId); + + impl Arbitrary for TestAuthorityId { + fn arbitrary(g: &mut G) -> Self { + let seed: [u8; 32] = g.gen(); + TestAuthorityId(AuthorityPair::from_seed_slice(&seed).unwrap().public()) + } + } + + #[derive(Clone, Debug)] + struct TestMultiaddr(Multiaddr); + + impl Arbitrary for TestMultiaddr { + fn arbitrary(g: &mut G) -> Self { + let seed: [u8; 32] = g.gen(); + let peer_id = PeerId::from_multihash( + multihash::wrap(multihash::Code::Sha2_256, &seed) + ).unwrap(); + let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + .unwrap() + .with(Protocol::P2p(peer_id.into())); + + TestMultiaddr(multiaddr) + } + } + + #[test] + fn retains_only_entries_of_provided_authority_ids() { + fn property( + first: (TestAuthorityId, TestMultiaddr), + second: (TestAuthorityId, TestMultiaddr), + third: (TestAuthorityId, TestMultiaddr), + ) -> TestResult { + let first: (AuthorityId, Multiaddr) = ((first.0).0, (first.1).0); + let second: (AuthorityId, Multiaddr) = ((second.0).0, (second.1).0); + let third: (AuthorityId, Multiaddr) = ((third.0).0, (third.1).0); + + let mut cache = AddrCache::new(); + + cache.insert(first.0.clone(), vec![first.1.clone()]); + cache.insert(second.0.clone(), vec![second.1.clone()]); + cache.insert(third.0.clone(), vec![third.1.clone()]); + + let subset = cache.get_random_subset(); + assert!( + subset.contains(&first.1) && subset.contains(&second.1) && subset.contains(&third.1), + "Expect initial subset to contain all authorities.", + ); + assert_eq!( + Some(&vec![third.1.clone()]), + cache.get_addresses_by_authority_id(&third.0), + "Expect `get_addresses_by_authority_id` to return addresses of third authority." + ); + assert_eq!( + Some(&third.0), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of third authority." + ); + + cache.retain_ids(&vec![first.0, second.0]); + + let subset = cache.get_random_subset(); + assert!( + subset.contains(&first.1) || subset.contains(&second.1), + "Expected both first and second authority." + ); + assert!(!subset.contains(&third.1), "Did not expect address from third authority"); + assert_eq!( + None, cache.get_addresses_by_authority_id(&third.0), + "Expect `get_addresses_by_authority_id` to not return `None` for third authority." + ); + assert_eq!( + None, cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `None` for third authority." + ); + + TestResult::passed() + } + + QuickCheck::new() + .max_tests(10) + .quickcheck(property as fn(_, _, _) -> TestResult) + } +} diff --git a/client/authority-discovery/src/schema/dht.proto b/client/authority-discovery/src/worker/schema/dht.proto similarity index 100% rename from client/authority-discovery/src/schema/dht.proto rename to client/authority-discovery/src/worker/schema/dht.proto diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs new file mode 100644 index 00000000000..68aadca7a7f --- /dev/null +++ b/client/authority-discovery/src/worker/tests.rs @@ -0,0 +1,693 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::worker::schema; + +use std::{iter::FromIterator, sync::{Arc, Mutex}}; + +use futures::channel::mpsc::channel; +use futures::executor::{block_on, LocalPool}; +use futures::future::{poll_fn, FutureExt}; +use futures::sink::SinkExt; +use futures::task::LocalSpawn; +use futures::poll; +use libp2p::{kad, core::multiaddr, PeerId}; + +use sp_api::{ProvideRuntimeApi, ApiRef}; +use sp_core::{crypto::Public, testing::KeyStore}; +use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; +use substrate_test_runtime_client::runtime::Block; + +use super::*; + +#[test] +fn interval_at_with_start_now() { + let start = Instant::now(); + + let mut interval = interval_at( + std::time::Instant::now(), + std::time::Duration::from_secs(10), + ); + + futures::executor::block_on(async { + interval.next().await; + }); + + assert!( + Instant::now().saturating_duration_since(start) < Duration::from_secs(1), + "Expected low resolution instant interval to fire within less than a second.", + ); +} + +#[test] +fn interval_at_is_queuing_ticks() { + let start = Instant::now(); + + let interval = interval_at(start, std::time::Duration::from_millis(100)); + + // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd + // at 200ms). + std::thread::sleep(Duration::from_millis(200)); + + futures::executor::block_on(async { + interval.take(3).collect::>().await; + }); + + // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is + // not queuing ticks. + assert!( + Instant::now().saturating_duration_since(start) < Duration::from_millis(300), + "Expect interval to /queue/ events when not polled for a while.", + ); +} + +#[test] +fn interval_at_with_initial_delay() { + let start = Instant::now(); + + let mut interval = interval_at( + std::time::Instant::now() + Duration::from_millis(100), + std::time::Duration::from_secs(10), + ); + + futures::executor::block_on(async { + interval.next().await; + }); + + assert!( + Instant::now().saturating_duration_since(start) > Duration::from_millis(100), + "Expected interval with initial delay not to fire right away.", + ); +} + +#[derive(Clone)] +pub(crate) struct TestApi { + pub(crate) authorities: Vec, +} + +impl ProvideRuntimeApi for TestApi { + type Api = RuntimeApi; + + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RuntimeApi { + authorities: self.authorities.clone(), + }.into() + } +} + +/// Blockchain database header backend. Does not perform any validation. +impl HeaderBackend for TestApi { + fn header( + &self, + _id: BlockId, + ) -> std::result::Result, sp_blockchain::Error> { + Ok(None) + } + + fn info(&self) -> sc_client_api::blockchain::Info { + sc_client_api::blockchain::Info { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + number_leaves: Default::default(), + } + } + + fn status( + &self, + _id: BlockId, + ) -> std::result::Result { + Ok(sc_client_api::blockchain::BlockStatus::Unknown) + } + + fn number( + &self, + _hash: Block::Hash, + ) -> std::result::Result>, sp_blockchain::Error> { + Ok(None) + } + + fn hash( + &self, + _number: NumberFor, + ) -> std::result::Result, sp_blockchain::Error> { + Ok(None) + } +} + +pub(crate) struct RuntimeApi { + authorities: Vec, +} + +sp_api::mock_impl_runtime_apis! { + impl AuthorityDiscoveryApi for RuntimeApi { + type Error = sp_blockchain::Error; + + fn authorities(&self) -> Vec { + self.authorities.clone() + } + } +} + +pub struct TestNetwork { + peer_id: PeerId, + // Whenever functions on `TestNetwork` are called, the function arguments are added to the + // vectors below. + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, + pub set_priority_group_call: Arc)>>>, +} + +impl Default for TestNetwork { + fn default() -> Self { + TestNetwork { + peer_id: PeerId::random(), + put_value_call: Default::default(), + get_value_call: Default::default(), + set_priority_group_call: Default::default(), + } + } +} + +impl NetworkProvider for TestNetwork { + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String> { + self.set_priority_group_call + .lock() + .unwrap() + .push((group_id, peers)); + Ok(()) + } + fn put_value(&self, key: kad::record::Key, value: Vec) { + self.put_value_call.lock().unwrap().push((key, value)); + } + fn get_value(&self, key: &kad::record::Key) { + self.get_value_call.lock().unwrap().push(key.clone()); + } +} + +impl NetworkStateInfo for TestNetwork { + fn local_peer_id(&self) -> PeerId { + self.peer_id.clone() + } + + fn external_addresses(&self) -> Vec { + vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()] + } +} + +#[test] +fn new_registers_metrics() { + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let registry = prometheus_endpoint::Registry::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + Some(registry.clone()), + ); + + assert!(registry.gather().len() > 0); +} + +#[test] +fn request_addresses_of_others_triggers_dht_get_query() { + let _ = ::env_logger::try_init(); + let (_dht_event_tx, dht_event_rx) = channel(1000); + + // Generate authority keys + let authority_1_key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); + let authority_2_key_pair = AuthorityPair::from_seed_slice(&[2; 32]).unwrap(); + + let test_api = Arc::new(TestApi { + authorities: vec![authority_1_key_pair.public(), authority_2_key_pair.public()], + }); + + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + worker.request_addresses_of_others().unwrap(); + + // Expect authority discovery to request new records from the dht. + assert_eq!(network.get_value_call.lock().unwrap().len(), 2); +} + +#[test] +fn publish_discover_cycle() { + let _ = ::env_logger::try_init(); + + // Node A publishing its address. + + let (_dht_event_tx, dht_event_rx) = channel(1000); + + let network: Arc = Arc::new(Default::default()); + let node_a_multiaddr = { + let peer_id = network.local_peer_id(); + let address = network.external_addresses().pop().unwrap(); + + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }; + + let key_store = KeyStore::new(); + let node_a_public = key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + let test_api = Arc::new(TestApi { + authorities: vec![node_a_public.into()], + }); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + worker.publish_ext_addresses().unwrap(); + + // Expect authority discovery to put a new record onto the dht. + assert_eq!(network.put_value_call.lock().unwrap().len(), 1); + + let dht_event = { + let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + + // Node B discovering node A's address. + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let test_api = Arc::new(TestApi { + // Make sure node B identifies node A as an authority. + authorities: vec![node_a_public.into()], + }); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + dht_event_tx.try_send(dht_event).unwrap(); + + let f = |cx: &mut Context<'_>| -> Poll<()> { + // Make authority discovery handle the event. + if let Poll::Ready(e) = worker.handle_dht_events(cx) { + panic!("Unexpected error: {:?}", e); + } + worker.set_priority_group().unwrap(); + + // Expect authority discovery to set the priority set. + assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); + + assert_eq!( + network.set_priority_group_call.lock().unwrap()[0], + ( + "authorities".to_string(), + HashSet::from_iter(vec![node_a_multiaddr.clone()].into_iter()) + ) + ); + + Poll::Ready(()) + }; + + let _ = block_on(poll_fn(f)); +} + +#[test] +fn terminate_when_event_stream_terminates() { + let (dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + block_on(async { + assert_eq!(Poll::Pending, poll!(&mut worker)); + + // Simulate termination of the network through dropping the sender side of the dht event + // channel. + drop(dht_event_tx); + + assert_eq!( + Poll::Ready(()), poll!(&mut worker), + "Expect the authority discovery module to terminate once the sending side of the dht \ + event channel is terminated.", + ); + }); +} + +#[test] +fn continue_operating_when_service_channel_is_dropped() { + let (_dht_event_tx, dht_event_rx) = channel(0); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let (to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + block_on(async { + assert_eq!(Poll::Pending, poll!(&mut worker)); + + drop(to_worker); + + for _ in 0..100 { + assert_eq!( + Poll::Pending, poll!(&mut worker), + "Expect authority discovery `Worker` not to panic when service channel is dropped.", + ); + } + }); +} + +#[test] +fn dont_stop_polling_when_error_is_returned() { + #[derive(PartialEq, Debug)] + enum Event { + Processed, + End, + }; + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let (mut discovery_update_tx, mut discovery_update_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + let mut pool = LocalPool::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + // Spawn the authority discovery to make sure it is polled independently. + // + // As this is a local pool, only one future at a time will have the CPU and + // can make progress until the future returns `Pending`. + pool.spawner().spawn_local_obj( + futures::future::poll_fn(move |ctx| { + match std::pin::Pin::new(&mut worker).poll(ctx) { + Poll::Ready(()) => {}, + Poll::Pending => { + discovery_update_tx.send(Event::Processed).now_or_never(); + return Poll::Pending; + }, + } + let _ = discovery_update_tx.send(Event::End).now_or_never().unwrap(); + Poll::Ready(()) + }).boxed_local().into(), + ).expect("Spawns authority discovery"); + + pool.run_until( + // The future that drives the event stream + async { + // Send an event that should generate an error + let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); + // Send the same event again to make sure that the event stream needs to be polled twice + // to be woken up again. + let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); + + // Now we call `await` and give the control to the authority discovery future. + assert_eq!(Some(Event::Processed), discovery_update_rx.next().await); + + // Drop the event rx to stop the authority discovery. If it was polled correctly, it + // should end properly. + drop(dht_event_tx); + + assert!( + discovery_update_rx.collect::>() + .await + .into_iter() + .any(|evt| evt == Event::End), + "The authority discovery should have ended", + ); + } + ); +} + +/// In the scenario of a validator publishing the address of its sentry node to +/// the DHT, said sentry node should not add its own Multiaddr to the +/// peerset "authority" priority group. +#[test] +fn never_add_own_address_to_priority_group() { + let validator_key_store = KeyStore::new(); + let validator_public = validator_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + + let sentry_network: Arc = Arc::new(Default::default()); + + let sentry_multiaddr = { + let peer_id = sentry_network.local_peer_id(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p(peer_id.into())) + }; + + // Address of some other sentry node of `validator`. + let random_multiaddr = { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }; + + let dht_event = { + let addresses = vec![ + sentry_multiaddr.to_vec(), + random_multiaddr.to_vec(), + ]; + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let signature = validator_key_store.read() + .sign_with( + key_types::AUTHORITY_DISCOVERY, + &validator_public.clone().into(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing) + .unwrap(); + + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let key = hash_authority_id(&validator_public.to_raw_vec()); + let value = signed_addresses; + (key, value) + }; + + let (_dht_event_tx, dht_event_rx) = channel(1); + let sentry_test_api = Arc::new(TestApi { + // Make sure the sentry node identifies its validator as an authority. + authorities: vec![validator_public.into()], + }); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut sentry_worker = Worker::new( + from_service, + sentry_test_api, + sentry_network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Sentry, + None, + ); + + sentry_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); + sentry_worker.set_priority_group().unwrap(); + + assert_eq!( + sentry_network.set_priority_group_call.lock().unwrap().len(), 1, + "Expect authority discovery to set the priority set.", + ); + + assert_eq!( + sentry_network.set_priority_group_call.lock().unwrap()[0], + ( + "authorities".to_string(), + HashSet::from_iter(vec![random_multiaddr.clone()].into_iter(),) + ), + "Expect authority discovery to only add `random_multiaddr`." + ); +} + +#[test] +fn do_not_cache_addresses_without_peer_id() { + let remote_key_store = KeyStore::new(); + let remote_public = remote_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + + let multiaddr_with_peer_id = { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p(peer_id.into())) + }; + + let multiaddr_without_peer_id: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + + let dht_event = { + let addresses = vec![ + multiaddr_with_peer_id.to_vec(), + multiaddr_without_peer_id.to_vec(), + ]; + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let signature = remote_key_store.read() + .sign_with( + key_types::AUTHORITY_DISCOVERY, + &remote_public.clone().into(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing) + .unwrap(); + + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let key = hash_authority_id(&remote_public.to_raw_vec()); + let value = signed_addresses; + (key, value) + }; + + let (_dht_event_tx, dht_event_rx) = channel(1); + let local_test_api = Arc::new(TestApi { + // Make sure the sentry node identifies its validator as an authority. + authorities: vec![remote_public.into()], + }); + let local_network: Arc = Arc::new(Default::default()); + let local_key_store = KeyStore::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut local_worker = Worker::new( + from_service, + local_test_api, + local_network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(local_key_store), + None, + ); + + local_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); + + assert_eq!( + Some(&vec![multiaddr_with_peer_id]), + local_worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()), + "Expect worker to only cache `Multiaddr`s with `PeerId`s.", + ); +} -- GitLab From ed4f7a113073c08cba9020c3f0dd6c8f0441f857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 12 Aug 2020 21:35:10 +0200 Subject: [PATCH 090/132] Make `HexDisplay` useable in `no_std` (#6883) Actually I use this quite often when debugging some WASM bugs and there is no harm in enabling it by default. Before I just always copied it everytime I needed it. --- primitives/core/src/hexdisplay.rs | 3 ++- primitives/core/src/lib.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/core/src/hexdisplay.rs b/primitives/core/src/hexdisplay.rs index f4c8fea8f22..9d2b7a12d03 100644 --- a/primitives/core/src/hexdisplay.rs +++ b/primitives/core/src/hexdisplay.rs @@ -67,7 +67,7 @@ impl AsBytesRef for [u8] { fn as_bytes_ref(&self) -> &[u8] { &self } } -impl AsBytesRef for Vec { +impl AsBytesRef for sp_std::vec::Vec { fn as_bytes_ref(&self) -> &[u8] { &self } } @@ -84,6 +84,7 @@ impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 48], [u8; 56], [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); /// Format into ASCII + # + hex, suitable for storage key preimages. +#[cfg(feature = "std")] pub fn ascii_format(asciish: &[u8]) -> String { let mut r = String::new(); let mut latch = false; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 7e52efd52e9..c8a289639d4 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -52,7 +52,6 @@ pub use impl_serde::serialize as bytes; pub mod hashing; #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; -#[cfg(feature = "std")] pub mod hexdisplay; pub mod crypto; -- GitLab From d019a66c863ebcf27b338cd29d5bff1c9275b794 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 13 Aug 2020 14:53:42 +0200 Subject: [PATCH 091/132] pallet-evm: avoid double fee payment (#6858) * pallet-evm: avoid double fee payment * Only skip fee payment for successful calls --- frame/evm/src/lib.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 910030383e1..c837f448fca 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -33,13 +33,12 @@ use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use frame_support::{ensure, decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::Weight; +use frame_support::weights::{Weight, Pays}; use frame_support::traits::{Currency, ExistenceRequirement, Get}; +use frame_support::dispatch::DispatchResultWithPostInfo; use frame_system::RawOrigin; use sp_core::{U256, H256, H160, Hasher}; -use sp_runtime::{ - DispatchResult, AccountId32, traits::{UniqueSaturatedInto, SaturatedConversion, BadOrigin}, -}; +use sp_runtime::{AccountId32, traits::{UniqueSaturatedInto, SaturatedConversion, BadOrigin}}; use sha3::{Digest, Keccak256}; pub use evm::{ExitReason, ExitSucceed, ExitError, ExitRevert, ExitFatal}; use evm::Config; @@ -325,7 +324,7 @@ decl_module! { gas_limit: u32, gas_price: U256, nonce: Option, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_call( @@ -346,7 +345,7 @@ decl_module! { }, } - Ok(()) + Ok(Pays::No.into()) } /// Issue an EVM create operation. This is similar to a contract creation transaction in @@ -360,7 +359,7 @@ decl_module! { gas_limit: u32, gas_price: U256, nonce: Option, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_create( @@ -380,7 +379,7 @@ decl_module! { }, } - Ok(()) + Ok(Pays::No.into()) } /// Issue an EVM create2 operation. @@ -394,7 +393,7 @@ decl_module! { gas_limit: u32, gas_price: U256, nonce: Option, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { T::CallOrigin::ensure_address_origin(&source, origin)?; match Self::execute_create2( @@ -415,7 +414,7 @@ decl_module! { }, } - Ok(()) + Ok(Pays::No.into()) } } } -- GitLab From 4d3c948a2fc1bd074939f3c05d63cca85c00e345 Mon Sep 17 00:00:00 2001 From: Alexander Popiak Date: Thu, 13 Aug 2020 18:54:05 +0200 Subject: [PATCH 092/132] add runtime migrations to release notes/changelog (#6875) --- .maintain/gitlab/generate_changelog.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh index b872d324438..c13871f50ee 100755 --- a/.maintain/gitlab/generate_changelog.sh +++ b/.maintain/gitlab/generate_changelog.sh @@ -11,6 +11,7 @@ runtime_changes="" api_changes="" client_changes="" changes="" +migrations="" while IFS= read -r line; do pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') @@ -29,6 +30,10 @@ $line" fi if has_label 'paritytech/substrate' "$pr_id" 'B7-runtimenoteworthy'; then runtime_changes="$runtime_changes +$line" + fi + if has_label 'paritytech/substrate' "$pr_id" 'D1-runtime-migration'; then + migrations="$migrations $line" fi done <<< "$all_changes" @@ -36,7 +41,8 @@ done <<< "$all_changes" # Make the substrate section if there are any substrate changes if [ -n "$runtime_changes" ] || [ -n "$api_changes" ] || - [ -n "$client_changes" ]; then + [ -n "$client_changes" ] || + [ -n "$migrations" ]; then changes=$(cat << EOF Substrate changes ----------------- @@ -68,5 +74,12 @@ $api_changes" $changes" fi +if [ -n "$migrations" ]; then + changes="$changes + +Runtime Migrations +------------------ +$migrations" +fi echo "$changes" -- GitLab From 8993a755c764627e1568e7fabb4013936e81b872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Thu, 13 Aug 2020 19:38:14 +0100 Subject: [PATCH 093/132] network: don't log re-discovered addresses (#6881) * network: move LruHashSet to network crate utils * network: don't log re-discovered external addresses * Update client/network/src/utils.rs Co-authored-by: mattrutherford <44339188+mattrutherford@users.noreply.github.com> Co-authored-by: mattrutherford <44339188+mattrutherford@users.noreply.github.com> --- client/network/src/discovery.rs | 29 +++++++++-- client/network/src/protocol.rs | 4 +- client/network/src/protocol/util.rs | 76 ----------------------------- client/network/src/utils.rs | 74 +++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 89 deletions(-) delete mode 100644 client/network/src/protocol/util.rs diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 7cb977e8e1a..e349b08c41d 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,6 +46,7 @@ //! use crate::config::ProtocolId; +use crate::utils::LruHashSet; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -63,10 +64,15 @@ use libp2p::swarm::toggle::Toggle; use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; use log::{debug, info, trace, warn}; -use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, time::Duration}; +use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, num::NonZeroUsize, time::Duration}; use std::task::{Context, Poll}; use sp_core::hexdisplay::HexDisplay; +/// Maximum number of known external addresses that we will cache. +/// This only affects whether we will log whenever we (re-)discover +/// a given address. +const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; + /// `DiscoveryBehaviour` configuration. /// /// Note: In order to discover nodes or load and store values via Kademlia one has to add at least @@ -190,7 +196,11 @@ impl DiscoveryConfig { } else { None.into() }, - allow_non_globals_in_dht: self.allow_non_globals_in_dht + allow_non_globals_in_dht: self.allow_non_globals_in_dht, + known_external_addresses: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_EXTERNAL_ADDRESSES) + .expect("value is a constant; constant is non-zero; qed.") + ), } } } @@ -221,7 +231,9 @@ pub struct DiscoveryBehaviour { /// Number of active connections over which we interrupt the discovery process. discovery_only_if_under_num: u64, /// Should non-global addresses be added to the DHT? - allow_non_globals_in_dht: bool + allow_non_globals_in_dht: bool, + /// A cache of discovered external addresses. Only used for logging purposes. + known_external_addresses: LruHashSet, } impl DiscoveryBehaviour { @@ -507,7 +519,16 @@ impl NetworkBehaviour for DiscoveryBehaviour { fn inject_new_external_addr(&mut self, addr: &Multiaddr) { let new_addr = addr.clone() .with(Protocol::P2p(self.local_peer_id.clone().into())); - info!(target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr); + + // NOTE: we might re-discover the same address multiple times + // in which case we just want to refrain from logging. + if self.known_external_addresses.insert(new_addr.clone()) { + info!(target: "sub-libp2p", + "🔍 Discovered new external address for our node: {}", + new_addr, + ); + } + for k in self.kademlias.values_mut() { NetworkBehaviour::inject_new_external_addr(k, addr) } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index f1ce6d2b560..c1c9ef02ea6 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -21,7 +21,7 @@ use crate::{ chain::{Client, FinalityProofProvider}, config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, error, - utils::interval + utils::{interval, LruHashSet}, }; use bytes::{Bytes, BytesMut}; @@ -60,11 +60,9 @@ use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; use sc_client_api::{ChangesProof, StorageProof}; -use util::LruHashSet; use wasm_timer::Instant; mod generic_proto; -mod util; pub mod message; pub mod event; diff --git a/client/network/src/protocol/util.rs b/client/network/src/protocol/util.rs deleted file mode 100644 index 9ba9bf6ae89..00000000000 --- a/client/network/src/protocol/util.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use linked_hash_set::LinkedHashSet; -use std::{hash::Hash, num::NonZeroUsize}; - -/// Wrapper around `LinkedHashSet` which grows bounded. -/// -/// In the limit, for each element inserted the oldest existing element will be removed. -#[derive(Debug, Clone)] -pub(crate) struct LruHashSet { - set: LinkedHashSet, - limit: NonZeroUsize -} - -impl LruHashSet { - /// Create a new `LruHashSet` with the given (exclusive) limit. - pub(crate) fn new(limit: NonZeroUsize) -> Self { - Self { set: LinkedHashSet::new(), limit } - } - - /// Insert element into the set. - /// - /// Returns `true` if this is a new element to the set, `false` otherwise. - /// Maintains the limit of the set by removing the oldest entry if necessary. - /// Inserting the same element will update its LRU position. - pub(crate) fn insert(&mut self, e: T) -> bool { - if self.set.insert(e) { - if self.set.len() == usize::from(self.limit) { - self.set.pop_front(); // remove oldest entry - } - return true - } - false - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn maintains_limit() { - let three = NonZeroUsize::new(3).unwrap(); - let mut set = LruHashSet::::new(three); - - // First element. - assert!(set.insert(1)); - assert_eq!(vec![&1], set.set.iter().collect::>()); - - // Second element. - assert!(set.insert(2)); - assert_eq!(vec![&1, &2], set.set.iter().collect::>()); - - // Inserting the same element updates its LRU position. - assert!(!set.insert(1)); - assert_eq!(vec![&2, &1], set.set.iter().collect::>()); - - // We reached the limit. The next element forces the oldest one out. - assert!(set.insert(3)); - assert_eq!(vec![&1, &3], set.set.iter().collect::>()); - } -} diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs index f13505d0124..490e2ced382 100644 --- a/client/network/src/utils.rs +++ b/client/network/src/utils.rs @@ -14,12 +14,74 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::time::Duration; -use futures::{FutureExt, Stream, StreamExt, stream::unfold}; +use futures::{stream::unfold, FutureExt, Stream, StreamExt}; use futures_timer::Delay; +use linked_hash_set::LinkedHashSet; +use std::time::Duration; +use std::{hash::Hash, num::NonZeroUsize}; + +/// Creates a stream that returns a new value every `duration`. +pub fn interval(duration: Duration) -> impl Stream + Unpin { + unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop) +} + +/// Wrapper around `LinkedHashSet` with bounded growth. +/// +/// In the limit, for each element inserted the oldest existing element will be removed. +#[derive(Debug, Clone)] +pub struct LruHashSet { + set: LinkedHashSet, + limit: NonZeroUsize, +} + +impl LruHashSet { + /// Create a new `LruHashSet` with the given (exclusive) limit. + pub fn new(limit: NonZeroUsize) -> Self { + Self { + set: LinkedHashSet::new(), + limit, + } + } + + /// Insert element into the set. + /// + /// Returns `true` if this is a new element to the set, `false` otherwise. + /// Maintains the limit of the set by removing the oldest entry if necessary. + /// Inserting the same element will update its LRU position. + pub fn insert(&mut self, e: T) -> bool { + if self.set.insert(e) { + if self.set.len() == usize::from(self.limit) { + self.set.pop_front(); // remove oldest entry + } + return true; + } + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn maintains_limit() { + let three = NonZeroUsize::new(3).unwrap(); + let mut set = LruHashSet::::new(three); + + // First element. + assert!(set.insert(1)); + assert_eq!(vec![&1], set.set.iter().collect::>()); + + // Second element. + assert!(set.insert(2)); + assert_eq!(vec![&1, &2], set.set.iter().collect::>()); + + // Inserting the same element updates its LRU position. + assert!(!set.insert(1)); + assert_eq!(vec![&2, &1], set.set.iter().collect::>()); -pub fn interval(duration: Duration) -> impl Stream + Unpin { - unfold((), move |_| { - Delay::new(duration).map(|_| Some(((), ()))) - }).map(drop) + // We reached the limit. The next element forces the oldest one out. + assert!(set.insert(3)); + assert_eq!(vec![&1, &3], set.set.iter().collect::>()); + } } -- GitLab From f16cbc16c3f0f5673b44951cc4933e52c9fd89ac Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 13 Aug 2020 23:30:22 +0200 Subject: [PATCH 094/132] More renaming to move away from phragmen. (#6886) --- Cargo.lock | 4 +- frame/staking/src/lib.rs | 56 +++---- frame/staking/src/mock.rs | 4 +- frame/staking/src/tests.rs | 158 +++++++++--------- .../npos-elections/compact/src/assignment.rs | 36 ++-- .../npos-elections/compact/src/codec.rs | 46 ++--- primitives/npos-elections/compact/src/lib.rs | 8 +- primitives/npos-elections/src/mock.rs | 20 +-- 8 files changed, 166 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc664f97dde..3ec07bb080a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8624,8 +8624,8 @@ name = "substrate-test-utils-derive" version = "0.8.0-rc5" dependencies = [ "proc-macro-crate", - "quote 1.0.6", - "syn 1.0.33", + "quote", + "syn", ] [[package]] diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 858bb279a85..8f5b8561eb4 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1300,34 +1300,34 @@ decl_error! { /// Rewards for this era have already been claimed for this validator. AlreadyClaimed, /// The submitted result is received out of the open window. - PhragmenEarlySubmission, + OffchainElectionEarlySubmission, /// The submitted result is not as good as the one stored on chain. - PhragmenWeakSubmission, + OffchainElectionWeakSubmission, /// The snapshot data of the current window is missing. SnapshotUnavailable, /// Incorrect number of winners were presented. - PhragmenBogusWinnerCount, + OffchainElectionBogusWinnerCount, /// One of the submitted winners is not an active candidate on chain (index is out of range /// in snapshot). - PhragmenBogusWinner, + OffchainElectionBogusWinner, /// Error while building the assignment type from the compact. This can happen if an index /// is invalid, or if the weights _overflow_. - PhragmenBogusCompact, + OffchainElectionBogusCompact, /// One of the submitted nominators is not an active nominator on chain. - PhragmenBogusNominator, + OffchainElectionBogusNominator, /// One of the submitted nominators has an edge to which they have not voted on chain. - PhragmenBogusNomination, + OffchainElectionBogusNomination, /// One of the submitted nominators has an edge which is submitted before the last non-zero /// slash of the target. - PhragmenSlashedNomination, + OffchainElectionSlashedNomination, /// A self vote must only be originated from a validator to ONLY themselves. - PhragmenBogusSelfVote, + OffchainElectionBogusSelfVote, /// The submitted result has unknown edges that are not among the presented winners. - PhragmenBogusEdge, + OffchainElectionBogusEdge, /// The claimed score does not match with the one computed from the data. - PhragmenBogusScore, + OffchainElectionBogusScore, /// The election size is invalid. - PhragmenBogusElectionSize, + OffchainElectionBogusElectionSize, /// The call is not allowed at the given time due to restrictions of election period. CallNotAllowed, /// Incorrect previous history depth input provided. @@ -2542,14 +2542,14 @@ impl Module { // check window open ensure!( Self::era_election_status().is_open(), - Error::::PhragmenEarlySubmission.with_weight(T::DbWeight::get().reads(1)), + Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(1)), ); // check current era. if let Some(current_era) = Self::current_era() { ensure!( current_era == era, - Error::::PhragmenEarlySubmission.with_weight(T::DbWeight::get().reads(2)), + Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(2)), ) } @@ -2557,7 +2557,7 @@ impl Module { if let Some(queued_score) = Self::queued_score() { ensure!( is_score_better(score, queued_score, T::MinSolutionScoreBump::get()), - Error::::PhragmenWeakSubmission.with_weight(T::DbWeight::get().reads(3)), + Error::::OffchainElectionWeakSubmission.with_weight(T::DbWeight::get().reads(3)), ) } @@ -2594,13 +2594,13 @@ impl Module { // size of the solution must be correct. ensure!( snapshot_validators_length == u32::from(election_size.validators), - Error::::PhragmenBogusElectionSize, + Error::::OffchainElectionBogusElectionSize, ); // check the winner length only here and when we know the length of the snapshot validators // length. let desired_winners = Self::validator_count().min(snapshot_validators_length); - ensure!(winners.len() as u32 == desired_winners, Error::::PhragmenBogusWinnerCount); + ensure!(winners.len() as u32 == desired_winners, Error::::OffchainElectionBogusWinnerCount); let snapshot_nominators_len = >::decode_len() .map(|l| l as u32) @@ -2609,7 +2609,7 @@ impl Module { // rest of the size of the solution must be correct. ensure!( snapshot_nominators_len == election_size.nominators, - Error::::PhragmenBogusElectionSize, + Error::::OffchainElectionBogusElectionSize, ); // decode snapshot validators. @@ -2621,7 +2621,7 @@ impl Module { // NOTE: at the moment, since staking is explicitly blocking any offence until election // is closed, we don't check here if the account id at `snapshot_validators[widx]` is // actually a validator. If this ever changes, this loop needs to also check this. - snapshot_validators.get(widx as usize).cloned().ok_or(Error::::PhragmenBogusWinner) + snapshot_validators.get(widx as usize).cloned().ok_or(Error::::OffchainElectionBogusWinner) }).collect::, Error>>()?; // decode the rest of the snapshot. @@ -2643,7 +2643,7 @@ impl Module { ).map_err(|e| { // log the error since it is not propagated into the runtime error. log!(warn, "💸 un-compacting solution failed due to {:?}", e); - Error::::PhragmenBogusCompact + Error::::OffchainElectionBogusCompact })?; // check all nominators actually including the claimed vote. Also check correct self votes. @@ -2659,7 +2659,7 @@ impl Module { // have bigger problems. log!(error, "💸 detected an error in the staking locking and snapshot."); // abort. - return Err(Error::::PhragmenBogusNominator.into()); + return Err(Error::::OffchainElectionBogusNominator.into()); } if !is_validator { @@ -2676,25 +2676,25 @@ impl Module { // each target in the provided distribution must be actually nominated by the // nominator after the last non-zero slash. if nomination.targets.iter().find(|&tt| tt == t).is_none() { - return Err(Error::::PhragmenBogusNomination.into()); + return Err(Error::::OffchainElectionBogusNomination.into()); } if ::SlashingSpans::get(&t).map_or( false, |spans| nomination.submitted_in < spans.last_nonzero_slash(), ) { - return Err(Error::::PhragmenSlashedNomination.into()); + return Err(Error::::OffchainElectionSlashedNomination.into()); } } } else { // a self vote - ensure!(distribution.len() == 1, Error::::PhragmenBogusSelfVote); - ensure!(distribution[0].0 == *who, Error::::PhragmenBogusSelfVote); + ensure!(distribution.len() == 1, Error::::OffchainElectionBogusSelfVote); + ensure!(distribution[0].0 == *who, Error::::OffchainElectionBogusSelfVote); // defensive only. A compact assignment of length one does NOT encode the weight and // it is always created to be 100%. ensure!( distribution[0].1 == OffchainAccuracy::one(), - Error::::PhragmenBogusSelfVote, + Error::::OffchainElectionBogusSelfVote, ); } } @@ -2713,11 +2713,11 @@ impl Module { &staked_assignments, ); // This technically checks that all targets in all nominators were among the winners. - ensure!(num_error == 0, Error::::PhragmenBogusEdge); + ensure!(num_error == 0, Error::::OffchainElectionBogusEdge); // Check if the score is the same as the claimed one. let submitted_score = evaluate_support(&supports); - ensure!(submitted_score == claimed_score, Error::::PhragmenBogusScore); + ensure!(submitted_score == claimed_score, Error::::OffchainElectionBogusScore); // At last, alles Ok. Exposures and store the result. let exposures = Self::collect_exposure(supports); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 84201827704..dcdacfbaacb 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -437,7 +437,7 @@ impl ExtBuilder { self.max_offchain_iterations = iterations; self } - pub fn offchain_phragmen_ext(self) -> Self { + pub fn offchain_election_ext(self) -> Self { self.session_per_era(4) .session_length(5) .election_lookahead(3) @@ -787,7 +787,7 @@ pub(crate) fn add_slash(who: &AccountId) { // winners will be chosen by simply their unweighted total backing stake. Nominator stake is // distributed evenly. -pub(crate) fn horrible_phragmen_with_post_processing( +pub(crate) fn horrible_npos_solution( do_reduce: bool, ) -> (CompactAssignments, Vec, ElectionScore) { let mut backing_stake_of: BTreeMap = BTreeMap::new(); diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index e5015cbdc92..278e5323876 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2810,7 +2810,7 @@ fn remove_multi_deferred() { }) } -mod offchain_phragmen { +mod offchain_election { use crate::*; use codec::Encode; use frame_support::{ @@ -2836,7 +2836,7 @@ mod offchain_phragmen { /// setup a new set of validators and nominator storage items independent of the parent mock /// file. This produces a edge graph that can be reduced. - pub fn build_offchain_phragmen_test_ext() { + pub fn build_offchain_election_test_ext() { for i in (10..=40).step_by(10) { // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts // have corresponding keys in session which makes everything more ergonomic and @@ -3104,7 +3104,7 @@ mod offchain_phragmen { #[ignore] // This takes a few mins fn offchain_wont_work_if_snapshot_fails() { ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .build() .execute_with(|| { run_to_block(12); @@ -3128,7 +3128,7 @@ mod offchain_phragmen { #[test] fn staking_is_locked_when_election_window_open() { ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .election_lookahead(3) .build() .execute_with(|| { @@ -3150,7 +3150,7 @@ mod offchain_phragmen { // should check that we have a new validator set normally, event says that it comes from // offchain. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .build() .execute_with(|| { run_to_block(12); @@ -3208,7 +3208,7 @@ mod offchain_phragmen { fn signed_result_can_be_submitted_later() { // same as `signed_result_can_be_submitted` but at a later block. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .build() .execute_with(|| { run_to_block(14); @@ -3246,7 +3246,7 @@ mod offchain_phragmen { // should check that we have a new validator set normally, event says that it comes from // offchain. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .build() .execute_with(|| { run_to_block(11); @@ -3267,7 +3267,7 @@ mod offchain_phragmen { current_era(), ElectionSize::default(), ), - Error::::PhragmenEarlySubmission, + Error::::OffchainElectionEarlySubmission, Some(::DbWeight::get().reads(1)), ); }) @@ -3277,12 +3277,12 @@ mod offchain_phragmen { fn weak_solution_is_rejected() { // A solution which is weaker than what we currently have on-chain is rejected. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .has_stakers(false) .validator_count(4) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); // a good solution @@ -3295,7 +3295,7 @@ mod offchain_phragmen { )); // a bad solution - let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + let (compact, winners, score) = horrible_npos_solution(false); assert_err_with_weight!( submit_solution( Origin::signed(10), @@ -3303,7 +3303,7 @@ mod offchain_phragmen { compact.clone(), score, ), - Error::::PhragmenWeakSubmission, + Error::::OffchainElectionWeakSubmission, Some(::DbWeight::get().reads(3)) ); }) @@ -3313,16 +3313,16 @@ mod offchain_phragmen { fn better_solution_is_accepted() { // A solution which is better than what we currently have on-chain is accepted. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); // a meeeeh solution - let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + let (compact, winners, score) = horrible_npos_solution(false); assert_ok!(submit_solution( Origin::signed(10), winners, @@ -3345,7 +3345,7 @@ mod offchain_phragmen { fn offchain_worker_runs_when_window_open() { // at the end of the first finalized block with ElectionStatus::open(_), it should execute. let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(2) .build(); let state = offchainify(&mut ext, 0); @@ -3387,7 +3387,7 @@ mod offchain_phragmen { // Offchain worker equalises based on the number provided by randomness. See the difference // in the priority, which comes from the computed score. let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(2) .max_offchain_iterations(2) .build(); @@ -3429,7 +3429,7 @@ mod offchain_phragmen { #[test] fn mediocre_submission_from_authority_is_early_rejected() { let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .build(); let state = offchainify(&mut ext, 0); @@ -3463,21 +3463,21 @@ mod offchain_phragmen { &inner, ), TransactionValidity::Err( - InvalidTransaction::Custom(>::PhragmenWeakSubmission.as_u8()).into(), + InvalidTransaction::Custom(>::OffchainElectionWeakSubmission.as_u8()).into(), ), ) }) } #[test] - fn invalid_phragmen_result_correct_number_of_winners() { + fn invalid_election_correct_number_of_winners() { ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); ValidatorCount::put(3); @@ -3493,15 +3493,15 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusWinnerCount, + Error::::OffchainElectionBogusWinnerCount, ); }) } #[test] - fn invalid_phragmen_result_solution_size() { + fn invalid_election_solution_size() { ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .build() .execute_with(|| { run_to_block(12); @@ -3517,21 +3517,21 @@ mod offchain_phragmen { current_era(), ElectionSize::default(), ), - Error::::PhragmenBogusElectionSize, + Error::::OffchainElectionBogusElectionSize, ); }) } #[test] - fn invalid_phragmen_result_correct_number_of_winners_1() { + fn invalid_election_correct_number_of_winners_1() { // if we have too little validators, then the number of candidates is the bound. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(8) // we simply cannot elect 8 .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); ValidatorCount::put(3); @@ -3547,21 +3547,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusWinnerCount, + Error::::OffchainElectionBogusWinnerCount, ); }) } #[test] - fn invalid_phragmen_result_correct_number_of_winners_2() { + fn invalid_election_correct_number_of_winners_2() { // if we have too little validators, then the number of candidates is the bound. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(8) // we simply cannot elect 8 .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); let (compact, winners, score) = prepare_submission_with(true, 2, |_| {}); @@ -3579,15 +3579,15 @@ mod offchain_phragmen { } #[test] - fn invalid_phragmen_result_out_of_bound_nominator_index() { + fn invalid_election_out_of_bound_nominator_index() { // A nominator index which is simply invalid ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); @@ -3605,21 +3605,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusCompact, + Error::::OffchainElectionBogusCompact, ); }) } #[test] - fn invalid_phragmen_result_out_of_bound_validator_index() { + fn invalid_election_out_of_bound_validator_index() { // A validator index which is out of bound ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); @@ -3637,21 +3637,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusCompact, + Error::::OffchainElectionBogusCompact, ); }) } #[test] - fn invalid_phragmen_result_out_of_bound_winner_index() { + fn invalid_election_out_of_bound_winner_index() { // A winner index which is simply invalid ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); @@ -3668,22 +3668,22 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusWinner, + Error::::OffchainElectionBogusWinner, ); }) } #[test] - fn invalid_phragmen_result_non_winner_validator_index() { + fn invalid_election_non_winner_validator_index() { // An edge that points to a correct validator index who is NOT a winner. This is very - // similar to the test that raises `PhragmenBogusNomination`. + // similar to the test that raises `OffchainElectionBogusNomination`. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(2) // we select only 2. .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); @@ -3703,21 +3703,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusEdge, + Error::::OffchainElectionBogusEdge, ); }) } #[test] - fn invalid_phragmen_result_wrong_self_vote() { + fn invalid_election_wrong_self_vote() { // A self vote for someone else. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); let (compact, winners, score) = prepare_submission_with(true, 2, |a| { @@ -3738,21 +3738,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusSelfVote, + Error::::OffchainElectionBogusSelfVote, ); }) } #[test] - fn invalid_phragmen_result_wrong_self_vote_2() { + fn invalid_election_wrong_self_vote_2() { // A self validator voting for someone else next to self vote. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); let (compact, winners, score) = prepare_submission_with(true, 2, |a| { @@ -3773,21 +3773,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusSelfVote, + Error::::OffchainElectionBogusSelfVote, ); }) } #[test] - fn invalid_phragmen_result_over_stake() { + fn invalid_election_over_stake() { // Someone's edge ratios sums to more than 100%. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you @@ -3807,13 +3807,13 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusCompact, + Error::::OffchainElectionBogusCompact, ); }) } #[test] - fn invalid_phragmen_result_under_stake() { + fn invalid_election_under_stake() { // at the time of this writing, we cannot under stake someone. The compact assignment works // in a way that some of the stakes are presented by the submitter, and the last one is read // from chain by subtracting the rest from total. Hence, the sum is always correct. @@ -3821,16 +3821,16 @@ mod offchain_phragmen { } #[test] - fn invalid_phragmen_result_invalid_target_stealing() { + fn invalid_election_invalid_target_stealing() { // A valid voter who voted for someone who is a candidate, and is a correct winner, but is // actually NOT nominated by this nominator. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); let (compact, winners, score) = prepare_submission_with(false, 0, |a| { @@ -3848,7 +3848,7 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusNomination, + Error::::OffchainElectionBogusNomination, ); }) } @@ -3859,12 +3859,12 @@ mod offchain_phragmen { // nomination should be disabled for the upcoming election. A solution must respect this // rule. ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); // finalize the round with fallback. This is needed since all nominator submission // are in era zero and we want this one to pass with no problems. @@ -3928,21 +3928,21 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenSlashedNomination, + Error::::OffchainElectionSlashedNomination, ); }) } #[test] - fn invalid_phragmen_result_wrong_score() { + fn invalid_election_wrong_score() { // A valid voter who's total distributed stake is more than what they bond ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - build_offchain_phragmen_test_ext(); + build_offchain_election_test_ext(); run_to_block(12); let (compact, winners, mut score) = prepare_submission_with(true, 2, |_| {}); @@ -3955,7 +3955,7 @@ mod offchain_phragmen { compact, score, ), - Error::::PhragmenBogusScore, + Error::::OffchainElectionBogusScore, ); }) } @@ -3963,7 +3963,7 @@ mod offchain_phragmen { #[test] fn offchain_storage_is_set() { let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .build(); let state = offchainify(&mut ext, 0); @@ -3987,7 +3987,7 @@ mod offchain_phragmen { #[test] fn offchain_storage_prevents_duplicate() { let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .build(); let _ = offchainify(&mut ext, 0); @@ -4032,7 +4032,7 @@ mod offchain_phragmen { #[should_panic] fn offence_is_blocked_when_window_open() { ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() @@ -4575,12 +4575,12 @@ fn on_initialize_weight_is_correct() { }); ExtBuilder::default() - .offchain_phragmen_ext() + .offchain_election_ext() .validator_count(4) .has_stakers(false) .build() .execute_with(|| { - crate::tests::offchain_phragmen::build_offchain_phragmen_test_ext(); + crate::tests::offchain_election::build_offchain_election_test_ext(); run_to_block(11); Staking::on_finalize(System::block_number()); System::set_block_number((System::block_number() + 1).into()); diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs index 218cd4f95a7..8b61076521d 100644 --- a/primitives/npos-elections/compact/src/assignment.rs +++ b/primitives/npos-elections/compact/src/assignment.rs @@ -78,7 +78,7 @@ fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { let name = field_name_for(1); quote!( for (voter_index, target_index) in self.#name { - assignments.push(_phragmen::Assignment { + assignments.push(_npos::Assignment { who: voter_at(voter_index).or_invalid_index()?, distribution: vec![ (target_at(target_index).or_invalid_index()?, #per_thing::one()) @@ -93,16 +93,16 @@ fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { quote!( for (voter_index, (t1_idx, p1), t2_idx) in self.#name { if p1 >= #per_thing::one() { - return Err(_phragmen::Error::CompactStakeOverflow); + return Err(_npos::Error::CompactStakeOverflow); } // defensive only. Since Percent doesn't have `Sub`. - let p2 = _phragmen::sp_arithmetic::traits::Saturating::saturating_sub( + let p2 = _npos::sp_arithmetic::traits::Saturating::saturating_sub( #per_thing::one(), p1, ); - assignments.push( _phragmen::Assignment { + assignments.push( _npos::Assignment { who: voter_at(voter_index).or_invalid_index()?, distribution: vec![ (target_at(t1_idx).or_invalid_index()?, p1), @@ -121,25 +121,25 @@ fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { let mut inners_parsed = inners .iter() .map(|(ref t_idx, p)| { - sum = _phragmen::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); + sum = _npos::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) }) - .collect::, _phragmen::Error>>()?; + .collect::, _npos::Error>>()?; if sum >= #per_thing::one() { - return Err(_phragmen::Error::CompactStakeOverflow); + return Err(_npos::Error::CompactStakeOverflow); } // defensive only. Since Percent doesn't have `Sub`. - let p_last = _phragmen::sp_arithmetic::traits::Saturating::saturating_sub( + let p_last = _npos::sp_arithmetic::traits::Saturating::saturating_sub( #per_thing::one(), sum, ); inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); - assignments.push(_phragmen::Assignment { + assignments.push(_npos::Assignment { who: voter_at(voter_index).or_invalid_index()?, distribution: inners_parsed, }); @@ -165,38 +165,38 @@ pub(crate) fn assignment( let into_impl = into_impl(count, weight_type.clone()); quote!( - use _phragmen::__OrInvalidIndex; + use _npos::__OrInvalidIndex; impl #ident { pub fn from_assignment( - assignments: Vec<_phragmen::Assignment>, + assignments: Vec<_npos::Assignment>, index_of_voter: FV, index_of_target: FT, - ) -> Result + ) -> Result where - A: _phragmen::IdentifierT, + A: _npos::IdentifierT, for<'r> FV: Fn(&'r A) -> Option<#voter_type>, for<'r> FT: Fn(&'r A) -> Option<#target_type>, { let mut compact: #ident = Default::default(); - for _phragmen::Assignment { who, distribution } in assignments { + for _npos::Assignment { who, distribution } in assignments { match distribution.len() { 0 => continue, #from_impl _ => { - return Err(_phragmen::Error::CompactTargetOverflow); + return Err(_npos::Error::CompactTargetOverflow); } } }; Ok(compact) } - pub fn into_assignment( + pub fn into_assignment( self, voter_at: impl Fn(#voter_type) -> Option, target_at: impl Fn(#target_type) -> Option, - ) -> Result>, _phragmen::Error> { - let mut assignments: Vec<_phragmen::Assignment> = Default::default(); + ) -> Result>, _npos::Error> { + let mut assignments: Vec<_npos::Assignment> = Default::default(); #into_impl Ok(assignments) } diff --git a/primitives/npos-elections/compact/src/codec.rs b/primitives/npos-elections/compact/src/codec.rs index 0a475bdddcd..6c5a3bc2134 100644 --- a/primitives/npos-elections/compact/src/codec.rs +++ b/primitives/npos-elections/compact/src/codec.rs @@ -49,9 +49,9 @@ fn decode_impl( quote! { let #name = < - Vec<(_phragmen::codec::Compact<#voter_type>, _phragmen::codec::Compact<#target_type>)> + Vec<(_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>)> as - _phragmen::codec::Decode + _npos::codec::Decode >::decode(value)?; let #name = #name .into_iter() @@ -66,12 +66,12 @@ fn decode_impl( let #name = < Vec<( - _phragmen::codec::Compact<#voter_type>, - (_phragmen::codec::Compact<#target_type>, _phragmen::codec::Compact<#weight_type>), - _phragmen::codec::Compact<#target_type>, + _npos::codec::Compact<#voter_type>, + (_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>), + _npos::codec::Compact<#target_type>, )> as - _phragmen::codec::Decode + _npos::codec::Decode >::decode(value)?; let #name = #name .into_iter() @@ -91,11 +91,11 @@ fn decode_impl( let #name = < Vec<( - _phragmen::codec::Compact<#voter_type>, - [(_phragmen::codec::Compact<#target_type>, _phragmen::codec::Compact<#weight_type>); #c-1], - _phragmen::codec::Compact<#target_type>, + _npos::codec::Compact<#voter_type>, + [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], + _npos::codec::Compact<#target_type>, )> - as _phragmen::codec::Decode + as _npos::codec::Decode >::decode(value)?; let #name = #name .into_iter() @@ -115,8 +115,8 @@ fn decode_impl( }).collect::(); quote!( - impl _phragmen::codec::Decode for #ident { - fn decode(value: &mut I) -> Result { + impl _npos::codec::Decode for #ident { + fn decode(value: &mut I) -> Result { #decode_impl_single #decode_impl_double #decode_impl_rest @@ -139,8 +139,8 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { let #name = self.#name .iter() .map(|(v, t)| ( - _phragmen::codec::Compact(v.clone()), - _phragmen::codec::Compact(t.clone()), + _npos::codec::Compact(v.clone()), + _npos::codec::Compact(t.clone()), )) .collect::>(); #name.encode_to(&mut r); @@ -153,12 +153,12 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { let #name = self.#name .iter() .map(|(v, (t1, w), t2)| ( - _phragmen::codec::Compact(v.clone()), + _npos::codec::Compact(v.clone()), ( - _phragmen::codec::Compact(t1.clone()), - _phragmen::codec::Compact(w.clone()) + _npos::codec::Compact(t1.clone()), + _npos::codec::Compact(w.clone()) ), - _phragmen::codec::Compact(t2.clone()), + _npos::codec::Compact(t2.clone()), )) .collect::>(); #name.encode_to(&mut r); @@ -171,8 +171,8 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { // we use the knowledge of the length to avoid copy_from_slice. let inners_compact_array = (0..c-1).map(|i| quote!{( - _phragmen::codec::Compact(inner[#i].0.clone()), - _phragmen::codec::Compact(inner[#i].1.clone()), + _npos::codec::Compact(inner[#i].0.clone()), + _npos::codec::Compact(inner[#i].1.clone()), ),} ).collect::(); @@ -180,9 +180,9 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { let #name = self.#name .iter() .map(|(v, inner, t_last)| ( - _phragmen::codec::Compact(v.clone()), + _npos::codec::Compact(v.clone()), [ #inners_compact_array ], - _phragmen::codec::Compact(t_last.clone()), + _npos::codec::Compact(t_last.clone()), )) .collect::>(); #name.encode_to(&mut r); @@ -190,7 +190,7 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { }).collect::(); quote!( - impl _phragmen::codec::Encode for #ident { + impl _npos::codec::Encode for #ident { fn encode(&self) -> Vec { let mut r = vec![]; #encode_impl_single diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index 2852bdef250..03526d17981 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -181,7 +181,7 @@ fn struct_def( } } else { // automatically derived. - quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _phragmen::codec::Encode, _phragmen::codec::Decode)]) + quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _npos::codec::Encode, _npos::codec::Decode)]) }; Ok(quote! ( @@ -189,7 +189,7 @@ fn struct_def( #derives_and_maybe_compact_encoding #vis struct #ident { #singles #doubles #rest } - impl _phragmen::VotingLimit for #ident { + impl _npos::VotingLimit for #ident { const LIMIT: usize = #count; } @@ -220,13 +220,13 @@ fn struct_def( fn imports() -> Result { if std::env::var("CARGO_PKG_NAME").unwrap() == "sp-npos-elections" { Ok(quote! { - use crate as _phragmen; + use crate as _npos; }) } else { match crate_name("sp-npos-elections") { Ok(sp_npos_elections) => { let ident = syn::Ident::new(&sp_npos_elections, Span::call_site()); - Ok(quote!( extern crate #ident as _phragmen; )) + Ok(quote!( extern crate #ident as _npos; )) }, Err(e) => Err(syn::Error::new(Span::call_site(), &e)), } diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 8898c2d8b40..9b25f6f5f2e 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -51,18 +51,18 @@ pub(crate) struct _Edge { pub(crate) struct _Support { pub own: f64, pub total: f64, - pub others: Vec<_PhragmenAssignment>, + pub others: Vec<_Assignment>, } -pub(crate) type _PhragmenAssignment = (A, f64); +pub(crate) type _Assignment = (A, f64); pub(crate) type _SupportMap = BTreeMap>; pub(crate) type AccountId = u64; #[derive(Debug, Clone)] -pub(crate) struct _PhragmenResult { +pub(crate) struct _ElectionResult { pub winners: Vec<(A, ExtendedBalance)>, - pub assignments: Vec<(A, Vec<_PhragmenAssignment>)> + pub assignments: Vec<(A, Vec<_Assignment>)> } pub(crate) fn auto_generate_self_voters(candidates: &[A]) -> Vec<(A, Vec)> { @@ -75,12 +75,12 @@ pub(crate) fn elect_float( initial_candidates: Vec, initial_voters: Vec<(A, Vec)>, stake_of: FS, -) -> Option<_PhragmenResult> where +) -> Option<_ElectionResult> where A: Default + Ord + Copy, for<'r> FS: Fn(&'r A) -> VoteWeight, { let mut elected_candidates: Vec<(A, ExtendedBalance)>; - let mut assigned: Vec<(A, Vec<_PhragmenAssignment>)>; + let mut assigned: Vec<(A, Vec<_Assignment>)>; let mut c_idx_cache = BTreeMap::::new(); let num_voters = initial_candidates.len() + initial_voters.len(); let mut voters: Vec<_Voter> = Vec::with_capacity(num_voters); @@ -172,14 +172,14 @@ pub(crate) fn elect_float( } } - Some(_PhragmenResult { + Some(_ElectionResult { winners: elected_candidates, assignments: assigned, }) } pub(crate) fn equalize_float( - mut assignments: Vec<(A, Vec<_PhragmenAssignment>)>, + mut assignments: Vec<(A, Vec<_Assignment>)>, supports: &mut _SupportMap, tolerance: f64, iterations: usize, @@ -211,7 +211,7 @@ pub(crate) fn equalize_float( pub(crate) fn do_equalize_float( voter: &A, budget_balance: VoteWeight, - elected_edges: &mut Vec<_PhragmenAssignment>, + elected_edges: &mut Vec<_Assignment>, support_map: &mut _SupportMap, tolerance: f64 ) -> f64 where @@ -366,7 +366,7 @@ pub(crate) fn run_and_compare( } pub(crate) fn build_support_map_float( - result: &mut _PhragmenResult, + result: &mut _ElectionResult, stake_of: FS, ) -> _SupportMap where for<'r> FS: Fn(&'r AccountId) -> VoteWeight -- GitLab From 0e703a505db814cd5b2c6da12bcc508fdfaf8d47 Mon Sep 17 00:00:00 2001 From: Alan Sapede Date: Fri, 14 Aug 2020 04:15:59 -0400 Subject: [PATCH 095/132] Adds debug logs to EVM frame (#6887) --- frame/evm/src/backend.rs | 35 ++++++++++++++++++++++++++++++++++- frame/evm/src/lib.rs | 12 +++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs index 495034b2c17..df8ef9b85c4 100644 --- a/frame/evm/src/backend.rs +++ b/frame/evm/src/backend.rs @@ -6,7 +6,7 @@ use codec::{Encode, Decode}; use sp_core::{U256, H256, H160}; use sp_runtime::traits::UniqueSaturatedInto; use frame_support::traits::Get; -use frame_support::storage::{StorageMap, StorageDoubleMap}; +use frame_support::{debug, storage::{StorageMap, StorageDoubleMap}}; use sha3::{Keccak256, Digest}; use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; use crate::{Trait, AccountStorages, AccountCodes, Module, Event}; @@ -147,6 +147,12 @@ impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { }); if let Some(code) = code { + debug::debug!( + target: "evm", + "Inserting code ({} bytes) at {:?}", + code.len(), + address + ); AccountCodes::insert(address, code); } @@ -156,8 +162,21 @@ impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { for (index, value) in storage { if value == H256::default() { + debug::debug!( + target: "evm", + "Removing storage for {:?} [index: {:?}]", + address, + index + ); AccountStorages::remove(address, index); } else { + debug::debug!( + target: "evm", + "Updating storage for {:?} [index: {:?}, value: {:?}]", + address, + index, + value + ); AccountStorages::insert(address, index, value); } } @@ -167,12 +186,26 @@ impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { } }, Apply::Delete { address } => { + debug::debug!( + target: "evm", + "Deleting account at {:?}", + address + ); Module::::remove_account(&address) }, } } for log in logs { + debug::trace!( + target: "evm", + "Inserting log for {:?}, topics ({}) {:?}, data ({}): {:?}]", + log.address, + log.topics.len(), + log.topics, + log.data.len(), + log.data + ); Module::::deposit_event(Event::::Log(Log { address: log.address, topics: log.topics, diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index c837f448fca..211946bed0e 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -32,7 +32,7 @@ use sp_std::vec::Vec; use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -use frame_support::{ensure, decl_module, decl_storage, decl_event, decl_error}; +use frame_support::{debug, ensure, decl_module, decl_storage, decl_event, decl_error}; use frame_support::weights::{Weight, Pays}; use frame_support::traits::{Currency, ExistenceRequirement, Get}; use frame_support::dispatch::DispatchResultWithPostInfo; @@ -617,6 +617,16 @@ impl Module { let used_gas = U256::from(executor.used_gas()); let actual_fee = executor.fee(gas_price); + debug::debug!( + target: "evm", + "Execution {:?} [source: {:?}, value: {}, gas_limit: {}, used_gas: {}, actual_fee: {}]", + retv, + source, + value, + gas_limit, + used_gas, + actual_fee + ); executor.deposit(source, total_fee.saturating_sub(actual_fee)); if apply_state { -- GitLab From 13b0650c45c4994f7fde3614fb8d09f5719eaeb1 Mon Sep 17 00:00:00 2001 From: Roman Borschel Date: Fri, 14 Aug 2020 10:41:47 +0200 Subject: [PATCH 096/132] Update to libp2p-0.23. (#6870) * Update to libp2p-0.23. Thereby incorporate bandwidth measurement along the lines previously done by libp2p itself. * Tweak dependencies for wasm32 compilation. For wasm32 we need to enable unstable features to make `task::Builder::local` available. * Simplify dependencies. * Simplify. Leave the calculation of bytes sent/received per second to the outer layers of the code, subject to their own individual update intervals. * Cleanup * Re-add lost dev dependency. * Avoid division by zero. * Remove redundant metric. * Enable sending of noise legacy handshakes. * Add comment about monotonic gauge. * CI --- Cargo.lock | 15 +++++-- bin/node/browser-testing/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/informant/src/display.rs | 32 ++++++++++++--- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 6 +-- client/network/src/lib.rs | 8 ++-- client/network/src/network_state.rs | 8 ++-- client/network/src/service.rs | 30 +++++++------- client/network/src/transport.rs | 23 +++++++---- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/rpc/src/system/tests.rs | 8 ++-- client/service/src/lib.rs | 4 +- client/service/src/metrics.rs | 54 +++++++++++++++++--------- client/telemetry/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- 18 files changed, 129 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ec07bb080a..197751bedb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -300,6 +300,12 @@ dependencies = [ "webpki-roots 0.19.0", ] +[[package]] +name = "atomic" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" + [[package]] name = "atty" version = "0.2.14" @@ -2797,10 +2803,11 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0306a49ee6a89468f96089906f36b0eef82c988dcfc8acf3e2dcd6ad1c859f85" +checksum = "b1ebb6c031584a5af181fe3a1e4b074af5d0b1a3b31663200f0251f4bcff6b5c" dependencies = [ + "atomic", "bytes 0.5.4", "futures 0.3.5", "lazy_static", @@ -2965,9 +2972,9 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f353f8966bbaaf7456535fffd3f366f153148773a0cf04b2ec3860955cb720e" +checksum = "1e594f2de0c23c2b7ad14802c991a2e68e95315c6a6c7715e53801506f20135d" dependencies = [ "bytes 0.5.4", "curve25519-dalek", diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index c3136ab74a4..1f5db1053d7 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } jsonrpc-core = "14.2.0" serde = "1.0.106" serde_json = "1.0.48" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 5338b360807..459df884d68 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -33,7 +33,7 @@ derive_more = { version = "0.99.2" } sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.2.0", features = ["http"] } hyper = "0.12.35" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } serde_json = "1.0" [features] diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 2866fc141a2..3f3e5b55894 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", default-features = false, version = "1 derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.22.0", default-features = false, features = ["kad"] } +libp2p = { version = "0.23.0", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} prost = "0.6.1" diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 4491eb61d69..ce7fb4fc4b1 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -45,6 +45,10 @@ pub struct InformantDisplay { last_number: Option>, /// The last time `display` or `new` has been called. last_update: Instant, + /// The last seen total of bytes received. + last_total_bytes_inbound: u64, + /// The last seen total of bytes sent. + last_total_bytes_outbound: u64, /// The format to print output in. format: OutputFormat, } @@ -55,6 +59,8 @@ impl InformantDisplay { InformantDisplay { last_number: None, last_update: Instant::now(), + last_total_bytes_inbound: 0, + last_total_bytes_outbound: 0, format, } } @@ -66,9 +72,25 @@ impl InformantDisplay { let finalized_number = info.chain.finalized_number; let num_connected_peers = net_status.num_connected_peers; let speed = speed::(best_number, self.last_number, self.last_update); - self.last_update = Instant::now(); + let total_bytes_inbound = net_status.total_bytes_inbound; + let total_bytes_outbound = net_status.total_bytes_outbound; + + let now = Instant::now(); + let elapsed = (now - self.last_update).as_secs(); + self.last_update = now; self.last_number = Some(best_number); + let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; + let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = + if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; + let (level, status, target) = match (net_status.sync_state, net_status.best_seen_block) { (SyncState::Idle, _) => ("💤", "Idle".into(), "".into()), (SyncState::Downloading, None) => ("⚙️ ", format!("Preparing{}", speed), "".into()), @@ -92,8 +114,8 @@ impl InformantDisplay { best_hash, Colour::White.bold().paint(format!("{}", finalized_number)), info.chain.finalized_hash, - Colour::Green.paint(format!("⬇ {}", TransferRateFormat(net_status.average_download_per_sec))), - Colour::Red.paint(format!("⬆ {}", TransferRateFormat(net_status.average_upload_per_sec))), + Colour::Green.paint(format!("⬇ {}", TransferRateFormat(avg_bytes_per_sec_inbound))), + Colour::Red.paint(format!("⬆ {}", TransferRateFormat(avg_bytes_per_sec_outbound))), ) } else { info!( @@ -108,8 +130,8 @@ impl InformantDisplay { best_hash, finalized_number, info.chain.finalized_hash, - TransferRateFormat(net_status.average_download_per_sec), - TransferRateFormat(net_status.average_upload_per_sec), + TransferRateFormat(avg_bytes_per_sec_inbound), + TransferRateFormat(avg_bytes_per_sec_outbound), ) } } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 95c0840dc8c..90d606238b6 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } log = "0.4.8" lru = "0.4.3" sc-network = { version = "0.8.0-rc5", path = "../network" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 11346fdd3ff..39598e2a887 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -26,7 +26,7 @@ erased-serde = "0.3.9" fnv = "1.0.6" fork-tree = { version = "2.0.0-rc5", path = "../../utils/fork-tree" } futures = "0.3.4" -futures-timer = "3.0.1" +futures-timer = "3.0.2" futures_codec = "0.4.0" hex = "0.4.0" ip_network = "0.3.4" @@ -61,7 +61,7 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.22.0" +version = "0.23.0" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] @@ -69,7 +69,7 @@ features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std" async-std = "1.6.2" assert_matches = "1.3" env_logger = "0.7.0" -libp2p = { version = "0.22.0", default-features = false, features = ["secio"] } +libp2p = { version = "0.23.0", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index fc5cab321d1..69635fce11f 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -309,8 +309,8 @@ pub struct NetworkStatus { pub num_connected_peers: usize, /// Total number of active peers. pub num_active_peers: usize, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, } diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index 970a63faed4..2e24e9c5a9f 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -43,10 +43,10 @@ pub struct NetworkState { pub connected_peers: HashMap, /// List of node that we know of but that we're not connected to. pub not_connected_peers: HashMap, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, /// State of the peerset manager. pub peerset: serde_json::Value, } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 11fdfc0beec..cc3821a455e 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -402,14 +402,14 @@ impl NetworkWorker { }) } - /// Returns the downloaded bytes per second averaged over the past few seconds. - pub fn average_download_per_sec(&self) -> u64 { - self.service.bandwidth.average_download_per_sec() + /// Returns the total number of bytes received so far. + pub fn total_bytes_inbound(&self) -> u64 { + self.service.bandwidth.total_inbound() } - /// Returns the uploaded bytes per second averaged over the past few seconds. - pub fn average_upload_per_sec(&self) -> u64 { - self.service.bandwidth.average_upload_per_sec() + /// Returns the total number of bytes sent so far. + pub fn total_bytes_outbound(&self) -> u64 { + self.service.bandwidth.total_outbound() } /// Returns the number of peers we're connected to. @@ -541,8 +541,8 @@ impl NetworkWorker { peer_id: Swarm::::local_peer_id(&swarm).to_base58(), listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), - average_download_per_sec: self.service.bandwidth.average_download_per_sec(), - average_upload_per_sec: self.service.bandwidth.average_upload_per_sec(), + total_bytes_inbound: self.service.bandwidth.total_inbound(), + total_bytes_outbound: self.service.bandwidth.total_outbound(), connected_peers, not_connected_peers, peerset: swarm.user_protocol_mut().peerset_debug_info(), @@ -1148,7 +1148,9 @@ struct Metrics { kbuckets_num_nodes: GaugeVec, listeners_local_addresses: Gauge, listeners_errors_total: Counter, - network_per_sec_bytes: GaugeVec, + // Note: `network_bytes_total` is a monotonic gauge obtained by + // sampling an existing counter. + network_bytes_total: GaugeVec, notifications_sizes: HistogramVec, notifications_streams_closed_total: CounterVec, notifications_streams_opened_total: CounterVec, @@ -1265,10 +1267,10 @@ impl Metrics { "sub_libp2p_listeners_errors_total", "Total number of non-fatal errors reported by a listener" )?, registry)?, - network_per_sec_bytes: register(GaugeVec::new( + network_bytes_total: register(GaugeVec::new( Opts::new( - "sub_libp2p_network_per_sec_bytes", - "Average bandwidth usage per second" + "sub_libp2p_network_bytes_total", + "Total bandwidth usage" ), &["direction"] )?, registry)?, @@ -1719,8 +1721,8 @@ impl Future for NetworkWorker { this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - metrics.network_per_sec_bytes.with_label_values(&["in"]).set(this.service.bandwidth.average_download_per_sec()); - metrics.network_per_sec_bytes.with_label_values(&["out"]).set(this.service.bandwidth.average_upload_per_sec()); + metrics.network_bytes_total.with_label_values(&["in"]).set(this.service.bandwidth.total_inbound()); + metrics.network_bytes_total.with_label_values(&["out"]).set(this.service.bandwidth.total_outbound()); metrics.is_major_syncing.set(is_major_syncing as u64); for (proto, num_entries) in this.network_service.num_kbuckets_entries() { let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 0c9a809384e..e8836c4c269 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -27,7 +27,7 @@ use libp2p::{ }; #[cfg(not(target_os = "unknown"))] use libp2p::{tcp, dns, websocket}; -use std::{io, sync::Arc, time::Duration, usize}; +use std::{io, sync::Arc, time::Duration}; pub use self::bandwidth::BandwidthSinks; @@ -43,7 +43,11 @@ pub fn build_transport( memory_only: bool, wasm_external_transport: Option, use_yamux_flow_control: bool -) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { +) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { + // Legacy noise configurations for backward compatibility. + let mut noise_legacy = noise::LegacyConfig::default(); + noise_legacy.send_legacy_handshake = true; + // Build configuration objects for encryption mechanisms. let noise_config = { // For more information about these two panics, see in "On the Importance of @@ -58,10 +62,12 @@ pub fn build_transport( once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); - core::upgrade::SelectUpgrade::new( - noise::NoiseConfig::xx(noise_keypair_spec), - noise::NoiseConfig::ix(noise_keypair_legacy) - ) + let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); + xx_config.set_legacy_config(noise_legacy.clone()); + let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); + ix_config.set_legacy_config(noise_legacy); + + core::upgrade::SelectUpgrade::new(xx_config, ix_config) }; // Build configuration objects for multiplexing mechanisms. @@ -104,7 +110,7 @@ pub fn build_transport( OptionalTransport::none() }); - let (transport, sinks) = bandwidth::BandwidthLogging::new(transport, Duration::from_secs(5)); + let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); // Encryption let transport = transport.and_then(move |stream, endpoint| { @@ -145,5 +151,6 @@ pub fn build_transport( .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .boxed(); - (transport, sinks) + (transport, bandwidth) } + diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index eb7788f5416..7c157ce1c60 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0-rc5", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 36654098067..8a9aa0adb18 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 7fe5cdc752a..dfe1fcc4151 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -87,8 +87,8 @@ fn api>>(sync: T) -> System { external_addresses: Default::default(), connected_peers: Default::default(), not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, + total_bytes_inbound: 0, + total_bytes_outbound: 0, peerset: serde_json::Value::Null, }).unwrap()); }, @@ -282,8 +282,8 @@ fn system_network_state() { external_addresses: Default::default(), connected_peers: Default::default(), not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, + total_bytes_inbound: 0, + total_bytes_outbound: 0, peerset: serde_json::Value::Null, } ); diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 415a5de4f93..e8d2ab8285c 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -322,8 +322,8 @@ async fn build_network_future< num_sync_peers: network.num_sync_peers(), num_connected_peers: network.num_connected_peers(), num_active_peers: network.num_active_peers(), - average_download_per_sec: network.average_download_per_sec(), - average_upload_per_sec: network.average_upload_per_sec(), + total_bytes_inbound: network.total_bytes_inbound(), + total_bytes_outbound: network.total_bytes_outbound(), }; let state = network.network_state(); ready_sink.send((status, state)); diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 7336d3862a6..90a77667581 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -26,6 +26,7 @@ use sp_transaction_pool::PoolStatus; use sp_utils::metrics::register_globals; use sc_client_api::ClientInfo; use sc_network::config::Role; +use wasm_timer::Instant; struct PrometheusMetrics { // generic info @@ -34,7 +35,6 @@ struct PrometheusMetrics { ready_transactions_number: Gauge, // I/O - network_per_sec_bytes: GaugeVec, database_cache: Gauge, state_cache: Gauge, state_db: GaugeVec, @@ -85,10 +85,6 @@ impl PrometheusMetrics { )?, registry)?, // I/ O - network_per_sec_bytes: register(GaugeVec::new( - Opts::new("network_per_sec_bytes", "Networking bytes per second"), - &["direction"] - )?, registry)?, database_cache: register(Gauge::new( "database_cache_bytes", "RocksDB cache size in bytes", )?, registry)?, @@ -105,11 +101,19 @@ impl PrometheusMetrics { pub struct MetricsService { metrics: Option, + last_update: Instant, + last_total_bytes_inbound: u64, + last_total_bytes_outbound: u64, } impl MetricsService { pub fn new() -> Self { - MetricsService { metrics: None } + MetricsService { + metrics: None, + last_total_bytes_inbound: 0, + last_total_bytes_outbound: 0, + last_update: Instant::now(), + } } pub fn with_prometheus( @@ -129,7 +133,12 @@ impl MetricsService { &config.impl_version, role_bits, ) - .map(|p| MetricsService { metrics: Some(p) }) + .map(|p| MetricsService { + metrics: Some(p), + last_total_bytes_inbound: 0, + last_total_bytes_outbound: 0, + last_update: Instant::now(), + }) } pub fn tick( @@ -138,16 +147,31 @@ impl MetricsService { txpool_status: &PoolStatus, net_status: &NetworkStatus, ) { + let now = Instant::now(); + let elapsed = (now - self.last_update).as_secs(); + let best_number = info.chain.best_number.saturated_into::(); let best_hash = info.chain.best_hash; let num_peers = net_status.num_connected_peers; let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); - let bandwidth_download = net_status.average_download_per_sec; - let bandwidth_upload = net_status.average_upload_per_sec; + let total_bytes_inbound = net_status.total_bytes_inbound; + let total_bytes_outbound = net_status.total_bytes_outbound; let best_seen_block = net_status .best_seen_block .map(|num: NumberFor| num.unique_saturated_into() as u64); + let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; + let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = + if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; + self.last_update = now; + telemetry!( SUBSTRATE_INFO; "system.interval"; @@ -157,8 +181,8 @@ impl MetricsService { "txcount" => txpool_status.ready, "finalized_height" => finalized_number, "finalized_hash" => ?info.chain.finalized_hash, - "bandwidth_download" => bandwidth_download, - "bandwidth_upload" => bandwidth_upload, + "bandwidth_download" => avg_bytes_per_sec_inbound, + "bandwidth_upload" => avg_bytes_per_sec_outbound, "used_state_cache_size" => info.usage.as_ref() .map(|usage| usage.memory.state_cache.as_bytes()) .unwrap_or(0), @@ -174,14 +198,6 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - metrics - .network_per_sec_bytes - .with_label_values(&["download"]) - .set(net_status.average_download_per_sec); - metrics - .network_per_sec_bytes - .with_label_values(&["upload"]) - .set(net_status.average_upload_per_sec); metrics .block_height .with_label_values(&["finalized"]) diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 09ec9a0910d..4c9cc05b07f 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.22.0", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.23.0", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 67d5603fdc5..d8c5073274d 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.22.0", default-features = false } +libp2p = { version = "0.23.0", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0-rc5"} sp-inherents = { version = "2.0.0-rc5", path = "../../inherents" } -- GitLab From eec7d7135a56b1198c084413d2dab0a9e92ad90b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 14 Aug 2020 18:15:45 +0200 Subject: [PATCH 097/132] client/authority-discovery: Revert query interval change (#6897) Revert the accidental query interval change from every one minute back to every 10 minutes. --- client/authority-discovery/src/worker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index dd13b89278e..16f19489f94 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -182,8 +182,8 @@ where // External addresses of other authorities can change at any given point in time. The // interval on which to query for external addresses of other authorities is a trade off // between efficiency and performance. - let query_interval_duration = Duration::from_secs(60); let query_interval_start = Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME; + let query_interval_duration = Duration::from_secs(10 * 60); let query_interval = interval_at(query_interval_start, query_interval_duration); // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when -- GitLab From cd3b62b74aa416028bb654543b96856f5846e3a7 Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Sat, 15 Aug 2020 10:08:31 +0100 Subject: [PATCH 098/132] RpcHandlers Refactorings (#6846) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * allow access to the underlying Pubsub instance from RpcHandlers * bump Cargo.lock * no more Arc * bump Cargo.lock * Debug,. * Arc * RpcHandler * RpcHandlers::io_handler * remove chain spec from cli * address pr comments * remove stray newline Co-authored-by: Ashley Co-authored-by: Tomasz Drwięga Co-authored-by: Ashley --- Cargo.lock | 1 + bin/node/cli/src/service.rs | 2 +- client/service/Cargo.toml | 3 ++- client/service/src/builder.rs | 4 ++-- client/service/src/lib.rs | 8 +++++++- client/service/src/task_manager/mod.rs | 9 ++++++--- utils/browser/src/lib.rs | 4 ++-- 7 files changed, 21 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 197751bedb1..ec5af8aca4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6973,6 +6973,7 @@ dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "hash-db", + "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", "log", diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index cd98c268096..f9ff096ad4b 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -341,7 +341,7 @@ pub fn new_full(config: Configuration) } pub fn new_light_base(config: Configuration) -> Result<( - TaskManager, Arc, Arc, + TaskManager, RpcHandlers, Arc, Arc::Hash>>, Arc>> ), ServiceError> { diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3ad91dc3ea3..6462549403b 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -26,7 +26,8 @@ test-helpers = [] derive_more = "0.99.2" futures01 = { package = "futures", version = "0.1.29" } futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-pubsub = "14.2.0" +jsonrpc-pubsub = "14.2" +jsonrpc-core = "14.2" rand = "0.7.3" parking_lot = "0.10.0" lazy_static = "1.4.0" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index eedc4582299..8ad95511f77 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -442,7 +442,7 @@ pub fn build_offchain_workers( /// Spawn the tasks that are required to run a node. pub fn spawn_tasks( params: SpawnTasksParams, -) -> Result, Error> +) -> Result where TCl: ProvideRuntimeApi + HeaderMetadata + Chain + BlockBackend + BlockIdTo + ProofProvider + @@ -540,7 +540,7 @@ pub fn spawn_tasks( ); let rpc = start_rpc_servers(&config, gen_handler)?; // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = Arc::new(RpcHandlers(gen_handler(sc_rpc::DenyUnsafe::No))); + let rpc_handlers = RpcHandlers(Arc::new(gen_handler(sc_rpc::DenyUnsafe::No).into())); // Telemetry let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index e8d2ab8285c..d19b9f5ea24 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -96,7 +96,8 @@ impl MallocSizeOfWasm for T {} impl MallocSizeOfWasm for T {} /// RPC handlers that can perform RPC queries. -pub struct RpcHandlers(sc_rpc_server::RpcHandler); +#[derive(Clone)] +pub struct RpcHandlers(Arc>); impl RpcHandlers { /// Starts an RPC query. @@ -115,6 +116,11 @@ impl RpcHandlers { .map(|res| res.expect("this should never fail")) .boxed() } + + /// Provides access to the underlying `MetaIoHandler` + pub fn io_handler(&self) -> Arc> { + self.0.clone() + } } /// Sinks to propagate network status updates. diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 6925d27f4e5..88a44e1360d 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -336,9 +336,12 @@ impl TaskManager { } } - /// Set what the task manager should keep alive. - pub(super) fn keep_alive(&mut self, to_keep_alive: T) { - self.keep_alive = Box::new(to_keep_alive); + /// Set what the task manager should keep alive, can be called multiple times. + pub fn keep_alive(&mut self, to_keep_alive: T) { + // allows this fn to safely called multiple times. + use std::mem; + let old = mem::replace(&mut self.keep_alive, Box::new(())); + self.keep_alive = Box::new((to_keep_alive, old)); } /// Register another TaskManager to terminate and gracefully shutdown when the parent diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 718a9b97511..ffd0a134be1 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -27,7 +27,7 @@ use wasm_bindgen::prelude::*; use futures::{ prelude::*, channel::{oneshot, mpsc}, compat::*, future::{ready, ok, select} }; -use std::{sync::Arc, pin::Pin}; +use std::pin::Pin; use sc_chain_spec::Extension; use libp2p_wasm_ext::{ExtTransport, ffi}; @@ -124,7 +124,7 @@ struct RpcMessage { } /// Create a Client object that connects to a service. -pub fn start_client(mut task_manager: TaskManager, rpc_handlers: Arc) -> Client { +pub fn start_client(mut task_manager: TaskManager, rpc_handlers: RpcHandlers) -> Client { // We dispatch a background task responsible for processing the service. // // The main action performed by the code below consists in polling the service with -- GitLab From 00791403e55a9d4fc6d6a1ab8aa2fe8bb383546f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 16 Aug 2020 00:05:36 +0200 Subject: [PATCH 099/132] Don't take the origin in `can_set_code` (#6899) It makes no sense that `can_set_code` takes the origin for checking it. Everybody reusing this function is only interested in the other checks that are done by this function. The origin should be checked by every dispatchable individually. --- frame/system/src/lib.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 0852bdd253f..30d5d019fc5 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -595,7 +595,8 @@ decl_module! { /// # #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] pub fn set_code(origin, code: Vec) { - Self::can_set_code(origin, &code)?; + ensure_root(origin)?; + Self::can_set_code(&code)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); Self::deposit_event(RawEvent::CodeUpdated); @@ -1234,14 +1235,10 @@ impl Module { /// Determine whether or not it is possible to update the code. /// - /// This function has no side effects and is idempotent, but is fairly - /// heavy. It is automatically called by `set_code`; in most cases, - /// a direct call to `set_code` is preferable. It is useful to call - /// `can_set_code` when it is desirable to perform the appropriate - /// runtime checks without actually changing the code yet. - pub fn can_set_code(origin: T::Origin, code: &[u8]) -> Result<(), sp_runtime::DispatchError> { - ensure_root(origin)?; - + /// Checks the given code if it is a valid runtime wasm blob by instantianting + /// it and extracting the runtime version of it. It checks that the runtime version + /// of the old and new runtime has the same spec name and that the spec version is increasing. + pub fn can_set_code(code: &[u8]) -> Result<(), sp_runtime::DispatchError> { let current_version = T::Version::get(); let new_version = sp_io::misc::runtime_version(&code) .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) -- GitLab From fc743dac54c3fd0a7da3db13a00f90dd99614747 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Mon, 17 Aug 2020 11:19:16 +0200 Subject: [PATCH 100/132] Add a DirectedGossip struct (#6803) * Add a DirectedGossip struct * Move protocol from prototype::new to biuld * More traits impls * Explain ordering * Apply suggestions from code review Co-authored-by: Toralf Wittner * Address concerns * Add basic test * Concerns * More concerns * Remove QueueSenderPrototype * Rename * Apply suggestions from code review Co-authored-by: Max Inden Co-authored-by: Toralf Wittner Co-authored-by: parity-processbot <> Co-authored-by: Max Inden --- client/network/Cargo.toml | 2 +- client/network/src/gossip.rs | 245 +++++++++++++++++++++++++++++ client/network/src/gossip/tests.rs | 201 +++++++++++++++++++++++ client/network/src/lib.rs | 2 + client/network/src/service.rs | 4 + 5 files changed, 453 insertions(+), 1 deletion(-) create mode 100644 client/network/src/gossip.rs create mode 100644 client/network/src/gossip/tests.rs diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 39598e2a887..de4f484535e 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.6.1" [dependencies] +async-std = { version = "1.6.2", features = ["unstable"] } bitflags = "1.2.0" bs58 = "0.3.1" bytes = "0.5.0" @@ -66,7 +67,6 @@ default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] -async-std = "1.6.2" assert_matches = "1.3" env_logger = "0.7.0" libp2p = { version = "0.23.0", default-features = false, features = ["secio"] } diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs new file mode 100644 index 00000000000..0650e7a2f81 --- /dev/null +++ b/client/network/src/gossip.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for sending rate-limited gossip messages. +//! +//! # Context +//! +//! The [`NetworkService`] struct provides a way to send notifications to a certain peer through +//! the [`NetworkService::notification_sender`] method. This method is quite low level and isn't +//! expected to be used directly. +//! +//! The [`QueuedSender`] struct provided by this module is built on top of +//! [`NetworkService::notification_sender`] and provides a cleaner way to send notifications. +//! +//! # Behaviour +//! +//! An instance of [`QueuedSender`] is specific to a certain combination of `PeerId` and +//! protocol name. It maintains a buffer of messages waiting to be sent out. The user of this API +//! is able to manipulate that queue, adding or removing obsolete messages. +//! +//! Creating a [`QueuedSender`] also returns a opaque `Future` whose responsibility it to +//! drain that queue and actually send the messages. If the substream with the given combination +//! of peer and protocol is closed, the queue is silently discarded. It is the role of the user +//! to track which peers we are connected to. +//! +//! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same +//! order as they have been sent. +//! It is possible, in the situation of disconnects and reconnects, that messages arrive in a +//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or +//! if some other code uses the [`NetworkService`] to send notifications to this combination or +//! peer and protocol, then the notifications will be interleaved in an unpredictable way. +//! + +use crate::{ExHashT, NetworkService}; + +use async_std::sync::{Condvar, Mutex, MutexGuard}; +use futures::prelude::*; +use libp2p::PeerId; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{ + collections::VecDeque, + fmt, + sync::{atomic, Arc}, + time::Duration, +}; + +#[cfg(test)] +mod tests; + +/// Notifications sender for a specific combination of network service, peer, and protocol. +pub struct QueuedSender { + /// Shared between the front and the back task. + shared: Arc>, +} + +impl QueuedSender { + /// Returns a new [`QueuedSender`] containing a queue of message for this specific + /// combination of peer and protocol. + /// + /// In addition to the [`QueuedSender`], also returns a `Future` whose role is to drive + /// the messages sending forward. + pub fn new( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + queue_size_limit: usize, + messages_encode: F + ) -> (Self, impl Future + Send + 'static) + where + M: Send + 'static, + B: BlockT + 'static, + H: ExHashT, + F: Fn(M) -> Vec + Send + 'static, + { + let shared = Arc::new(Shared { + stop_task: atomic::AtomicBool::new(false), + condvar: Condvar::new(), + queue_size_limit, + messages_queue: Mutex::new(VecDeque::with_capacity(queue_size_limit)), + }); + + let task = spawn_task( + service, + peer_id, + protocol, + shared.clone(), + messages_encode + ); + + (QueuedSender { shared }, task) + } + + /// Locks the queue of messages towards this peer. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn lock_queue<'a>(&'a self) -> QueueGuard<'a, M> { + QueueGuard { + messages_queue: self.shared.messages_queue.lock().await, + condvar: &self.shared.condvar, + queue_size_limit: self.shared.queue_size_limit, + } + } + + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn queue_or_discard(&self, message: M) + where + M: Send + 'static + { + self.lock_queue().await.push_or_discard(message); + } +} + +impl fmt::Debug for QueuedSender { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("QueuedSender").finish() + } +} + +impl Drop for QueuedSender { + fn drop(&mut self) { + // The "clean" way to notify the `Condvar` here is normally to first lock the `Mutex`, + // then notify the `Condvar` while the `Mutex` is locked. Unfortunately, the `Mutex` + // being asynchronous, it can't reasonably be locked from within a destructor. + // See also the corresponding code in the background task. + self.shared.stop_task.store(true, atomic::Ordering::Release); + self.shared.condvar.notify_all(); + } +} + +/// Locked queue of messages to the given peer. +/// +/// As long as this struct exists, the background task is asleep and the owner of the [`QueueGuard`] +/// is in total control of the buffer. Messages can only ever be sent out after the [`QueueGuard`] +/// is dropped. +#[must_use] +pub struct QueueGuard<'a, M> { + messages_queue: MutexGuard<'a, VecDeque>, + condvar: &'a Condvar, + /// Same as [`Shared::queue_size_limit`]. + queue_size_limit: usize, +} + +impl<'a, M: Send + 'static> QueueGuard<'a, M> { + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The message will only start being sent out after the [`QueueGuard`] is dropped. + pub fn push_or_discard(&mut self, message: M) { + if self.messages_queue.len() < self.queue_size_limit { + self.messages_queue.push_back(message); + } + } + + /// Calls `filter` for each message in the queue, and removes the ones for which `false` is + /// returned. + /// + /// > **Note**: The parameter of `filter` is a `&M` and not a `&mut M` (which would be + /// > better) because the underlying implementation relies on `VecDeque::retain`. + pub fn retain(&mut self, filter: impl FnMut(&M) -> bool) { + self.messages_queue.retain(filter); + } +} + +impl<'a, M> Drop for QueueGuard<'a, M> { + fn drop(&mut self) { + // We notify the `Condvar` in the destructor in order to be able to push multiple + // messages and wake up the background task only once afterwards. + self.condvar.notify_one(); + } +} + +#[derive(Debug)] +struct Shared { + /// Read by the background task after locking `locked`. If true, the task stops. + stop_task: atomic::AtomicBool, + /// Queue of messages waiting to be sent out. + messages_queue: Mutex>, + /// Must be notified every time the content of `locked` changes. + condvar: Condvar, + /// Maximum number of elements in `messages_queue`. + queue_size_limit: usize, +} + +async fn spawn_task Vec>( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + shared: Arc>, + messages_encode: F, +) { + loop { + let next_message = 'next_msg: loop { + let mut queue = shared.messages_queue.lock().await; + + loop { + if shared.stop_task.load(atomic::Ordering::Acquire) { + return; + } + + if let Some(msg) = queue.pop_front() { + break 'next_msg msg; + } + + // It is possible that the destructor of `QueuedSender` sets `stop_task` to + // true and notifies the `Condvar` after the background task loads `stop_task` + // and before it calls `Condvar::wait`. + // See also the corresponding comment in `QueuedSender::drop`. + // For this reason, we use `wait_timeout`. In the worst case scenario, + // `stop_task` will always be checked again after the timeout is reached. + queue = shared.condvar.wait_timeout(queue, Duration::from_secs(10)).await.0; + } + }; + + // Starting from below, we try to send the message. If an error happens when sending, + // the only sane option we have is to silently discard the message. + let sender = match service.notification_sender(peer_id.clone(), protocol) { + Ok(s) => s, + Err(_) => continue, + }; + + let ready = match sender.ready().await { + Ok(r) => r, + Err(_) => continue, + }; + + let _ = ready.send(messages_encode(next_message)); + } +} diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs new file mode 100644 index 00000000000..9b16e057461 --- /dev/null +++ b/client/network/src/gossip/tests.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; + +use futures::prelude::*; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkService = NetworkService< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, +>; + +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node(config: config::NetworkConfiguration) + -> (Arc, impl Stream) +{ + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); + + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } + + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator, + ), + metrics_registry: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; + +/// Builds two nodes and their associated events stream. +/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +fn build_nodes_one_proto() + -> (Arc, impl Stream, Arc, impl Stream) +{ + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + listen_addresses: vec![], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + (node1, events_stream1, node2, events_stream2) +} + +#[test] +fn basic_works() { + const NUM_NOTIFS: usize = 256; + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let node2_id = node2.local_peer_id().clone(); + + let receiver = async_std::task::spawn(async move { + let mut received_notifications = 0; + + while received_notifications < NUM_NOTIFS { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamClosed { .. } => panic!(), + Event::NotificationsReceived { messages, .. } => { + for message in messages { + assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.1, &b"message"[..]); + received_notifications += 1; + } + } + _ => {} + }; + + if rand::random::() < 2 { + async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; + } + } + }); + + async_std::task::block_on(async move { + let (sender, bg_future) = + QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + async_std::task::spawn(bg_future); + + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {} + }; + } + + for _ in 0..NUM_NOTIFS { + sender.queue_or_discard(b"message".to_vec()).await; + } + + receiver.await; + }); +} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 69635fce11f..e01b2602635 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -15,6 +15,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . + #![warn(unused_extern_crates)] #![warn(missing_docs)] @@ -259,6 +260,7 @@ mod utils; pub mod config; pub mod error; +pub mod gossip; pub mod network_state; pub use service::{NetworkService, NetworkWorker}; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index cc3821a455e..d42af16f1d2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -701,6 +701,10 @@ impl NetworkService { /// Notifications should be dropped /// if buffer is full /// + /// + /// See also the [`gossip`](crate::gossip) module for a higher-level way to send + /// notifications. + /// pub fn notification_sender( &self, target: PeerId, -- GitLab From 488b7c7286d4ca9cedd047a5e2c2381736a3f088 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Aug 2020 13:41:09 +0200 Subject: [PATCH 101/132] babe, aura, pow: only call check_inherents if authoring version is compatible (#6862) * pow: check can_author_with before calling check_inherents * babe: check can_author_with before calling check_inherents * aura: check can_author_with before calling check_inherents * Fix node and node template compile * Add missing comma * Put each parameter on its own line * Add debug print * Fix line width too long * Fix pow line width issue --- bin/node-template/node/src/service.rs | 6 ++-- bin/node/cli/src/service.rs | 2 ++ client/consensus/aura/src/lib.rs | 36 ++++++++++++++++++------ client/consensus/babe/src/lib.rs | 25 ++++++++++++++--- client/consensus/babe/src/tests.rs | 5 ++-- client/consensus/pow/src/lib.rs | 38 +++++++++++++++++++++----- primitives/consensus/common/src/lib.rs | 9 ++++++ 7 files changed, 97 insertions(+), 24 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 4eba4fdd093..5984d673223 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -54,7 +54,7 @@ pub fn new_partial(config: &Configuration) -> Result( + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, aura_block_import, Some(Box::new(grandpa_block_import.clone())), @@ -63,6 +63,7 @@ pub fn new_partial(config: &Configuration) -> Result Result { let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, grandpa_block_import, None, @@ -249,6 +250,7 @@ pub fn new_light(config: Configuration) -> Result { InherentDataProviders::new(), &task_manager.spawn_handle(), config.prometheus_registry(), + sp_consensus::NeverCanAuthor, )?; let finality_proof_provider = diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index f9ff096ad4b..d91696ab7d6 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -97,6 +97,7 @@ pub fn new_partial(config: &Configuration) -> Result Result<( inherent_data_providers.clone(), &task_manager.spawn_handle(), config.prometheus_registry(), + sp_consensus::NeverCanAuthor, )?; let finality_proof_provider = diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 4e6cb49f112..42040287113 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -492,14 +492,16 @@ fn check_header( } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData