From 288ead00e65959297c38a43b10bd862e22aa6353 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 16 Jun 2020 18:51:21 +0200 Subject: [PATCH 001/144] client/authority-discovery: Don't add own address to priority group (#6370) * client/authority-discovery: Don't add own address to priority group In the scenario of a validator publishing the address of its sentry node to the DHT, said sentry node should not add its own Multiaddr to the peerset "authority" priority group. Related to 70cfeff. * client/authority-discovery: Remove unused import PeerId * client/authority-discovery/tests: Add tcp protocol to multiaddresses --- client/authority-discovery/src/lib.rs | 23 ++++- client/authority-discovery/src/tests.rs | 107 +++++++++++++++++++++++- 2 files changed, 122 insertions(+), 8 deletions(-) diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index de98e6a4a3..e816600b7c 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -58,19 +58,26 @@ use futures::task::{Context, Poll}; use futures::{Future, FutureExt, ready, Stream, StreamExt}; use futures_timer::Delay; +use addr_cache::AddrCache; use codec::Decode; use error::{Error, Result}; +use libp2p::core::multiaddr; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{Multiaddr, config::MultiaddrWithPeerId, DhtEvent, ExHashT, NetworkStateInfo}; +use sc_network::{ + config::MultiaddrWithPeerId, + DhtEvent, + ExHashT, + Multiaddr, + NetworkStateInfo, +}; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; use sp_core::crypto::{key_types, Pair}; use sp_core::traits::BareCryptoStorePtr; use sp_runtime::{traits::Block as BlockT, generic::BlockId}; use sp_api::ProvideRuntimeApi; -use addr_cache::AddrCache; #[cfg(test)] mod tests; @@ -233,7 +240,7 @@ where .collect(), None => self.network.external_addresses() .into_iter() - .map(|a| a.with(libp2p::core::multiaddr::Protocol::P2p( + .map(|a| a.with(multiaddr::Protocol::P2p( self.network.local_peer_id().into(), ))) .map(|a| a.to_vec()) @@ -423,6 +430,8 @@ where .get(&remote_key) .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; + let local_peer_id = multiaddr::Protocol::P2p(self.network.local_peer_id().into()); + let remote_addresses: Vec = values.into_iter() .map(|(_k, v)| { let schema::SignedAuthorityAddresses { signature, addresses } = @@ -447,7 +456,13 @@ where Ok(addresses) }) .collect::>>>()? - .into_iter().flatten().collect(); + .into_iter() + .flatten() + // Ignore own addresses. + .filter(|addr| !addr.iter().any(|protocol| + protocol == local_peer_id + )) + .collect(); if !remote_addresses.is_empty() { self.addr_cache.insert(authority_id.clone(), remote_addresses); diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 12edcf5fc9..09a65fd138 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -24,10 +24,10 @@ use futures::future::{poll_fn, FutureExt}; use futures::sink::SinkExt; use futures::task::LocalSpawn; use futures::poll; -use libp2p::{kad, PeerId}; +use libp2p::{kad, core::multiaddr, PeerId}; use sp_api::{ProvideRuntimeApi, ApiRef}; -use sp_core::testing::KeyStore; +use sp_core::{crypto::Public, testing::KeyStore}; use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; use substrate_test_runtime_client::runtime::Block; @@ -210,7 +210,7 @@ impl NetworkStateInfo for TestNetwork { } fn external_addresses(&self) -> Vec { - vec!["/ip6/2001:db8::".parse().unwrap()] + vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()] } } @@ -281,7 +281,7 @@ fn publish_discover_cycle() { let peer_id = network.local_peer_id(); let address = network.external_addresses().pop().unwrap(); - address.with(libp2p::core::multiaddr::Protocol::P2p( + address.with(multiaddr::Protocol::P2p( peer_id.into(), )) }; @@ -461,3 +461,102 @@ fn dont_stop_polling_when_error_is_returned() { } ); } + +/// In the scenario of a validator publishing the address of its sentry node to +/// the DHT, said sentry node should not add its own Multiaddr to the +/// peerset "authority" priority group. +#[test] +fn never_add_own_address_to_priority_group() { + let validator_key_store = KeyStore::new(); + let validator_public = validator_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + + let sentry_network: Arc = Arc::new(Default::default()); + + let sentry_multiaddr = { + let peer_id = sentry_network.local_peer_id(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }; + + // Address of some other sentry node of `validator`. + let random_multiaddr = { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }; + + let dht_event = { + let addresses = vec![ + sentry_multiaddr.to_vec(), + random_multiaddr.to_vec(), + ]; + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let signature = validator_key_store.read() + .sign_with( + key_types::AUTHORITY_DISCOVERY, + &validator_public.clone().into(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing) + .unwrap(); + + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let key = hash_authority_id(&validator_public.to_raw_vec()); + let value = signed_addresses; + (key, value) + }; + + let (_dht_event_tx, dht_event_rx) = channel(1); + let sentry_test_api = Arc::new(TestApi { + // Make sure the sentry node identifies its validator as an authority. + authorities: vec![validator_public.into()], + }); + + let mut sentry_authority_discovery = AuthorityDiscovery::new( + sentry_test_api, + sentry_network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Sentry, + None, + ); + + sentry_authority_discovery.handle_dht_value_found_event(vec![dht_event]).unwrap(); + + assert_eq!( + sentry_network.set_priority_group_call.lock().unwrap().len(), 1, + "Expect authority discovery to set the priority set.", + ); + + assert_eq!( + sentry_network.set_priority_group_call.lock().unwrap()[0], + ( + "authorities".to_string(), + HashSet::from_iter(vec![random_multiaddr.clone()].into_iter(),) + ), + "Expect authority discovery to only add `random_multiaddr`." + ); +} -- GitLab From 74efab4049c0637b405efba4dac5269b40e5da25 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 16 Jun 2020 22:49:01 +0200 Subject: [PATCH 002/144] .gitlab-ci.yml: Run promtool on Prometheus alerting rules (#6344) * .gitlab-ci.yml: Run promtool on Prometheus alerting rules Add a CI stage to test the Prometheus alerting rules within `.maintain/monitoring`. * .gitlab-ci.yml: Switch Prometheus stage to paritytech/tools image * .gitlab-ci.yml: Follow http redirects in Prometheus stage * .gitlab-ci.yml: Fix Prometheus stage promtool folder name --- .gitlab-ci.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c4442dece9..e146d40ee6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -359,6 +359,15 @@ cargo-check-macos: tags: - osx +test-prometheus-alerting-rules: + stage: test + image: paritytech/tools:latest + <<: *kubernetes-build + script: + - curl -L https://github.com/prometheus/prometheus/releases/download/v2.19.0/prometheus-2.19.0.linux-amd64.tar.gz --output prometheus.tar.gz + - tar -xzf prometheus.tar.gz + - ./prometheus-*/promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml + #### stage: build check-polkadot-companion-status: -- GitLab From 1823782590e51f53bb5bdc28ce198bfac3d91bbf Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 16 Jun 2020 22:49:44 +0200 Subject: [PATCH 003/144] Use /dns/ instead of /dns4/ (#6369) --- .maintain/sentry-node/docker-compose.yml | 14 +++++++------- bin/node/cli/res/flaming-fir.json | 2 +- client/network/src/discovery.rs | 3 ++- client/network/src/lib.rs | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.maintain/sentry-node/docker-compose.yml b/.maintain/sentry-node/docker-compose.yml index 376538dde5..235f2c4963 100644 --- a/.maintain/sentry-node/docker-compose.yml +++ b/.maintain/sentry-node/docker-compose.yml @@ -47,9 +47,9 @@ services: - "--validator" - "--alice" - "--sentry-nodes" - - "/dns4/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" - "--reserved-nodes" - - "/dns4/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" # Not only bind to localhost. - "--unsafe-ws-external" - "--unsafe-rpc-external" @@ -83,11 +83,11 @@ services: - "--port" - "30333" - "--sentry" - - "/dns4/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" - "--reserved-nodes" - - "/dns4/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" - "--bootnodes" - - "/dns4/validator-b/tcp/30333/p2p/QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk" + - "/dns/validator-b/tcp/30333/p2p/QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk" - "--no-telemetry" - "--rpc-cors" - "all" @@ -118,9 +118,9 @@ services: - "--validator" - "--bob" - "--bootnodes" - - "/dns4/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" - "--bootnodes" - - "/dns4/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" - "--no-telemetry" - "--rpc-cors" - "all" diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 7cc2c11c32..5f2eb26588 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -14,7 +14,7 @@ ], "telemetryEndpoints": [ [ - "/dns4/telemetry.polkadot.io/tcp/443/x-parity-wss/%2Fsubmit%2F", + "/dns/telemetry.polkadot.io/tcp/443/x-parity-wss/%2Fsubmit%2F", 0 ] ], diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index f5c293b251..73a5916947 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -324,7 +324,8 @@ impl DiscoveryBehaviour { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), - Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => return true, + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) + => return true, _ => return false }; ip.is_global() diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 73e0b525a1..6106616d99 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -77,7 +77,7 @@ //! - WebSockets for addresses of the form `/ip4/1.2.3.4/tcp/5/ws`. A TCP/IP connection is open and //! the WebSockets protocol is negotiated on top. Communications then happen inside WebSockets data //! frames. Encryption and multiplexing are additionally negotiated again inside this channel. -//! - DNS for addresses of the form `/dns4/example.com/tcp/5` or `/dns4/example.com/tcp/5/ws`. A +//! - DNS for addresses of the form `/dns/example.com/tcp/5` or `/dns/example.com/tcp/5/ws`. A //! node's address can contain a domain name. //! - (All of the above using IPv6 instead of IPv4.) //! -- GitLab From 02e77d20b7949f89c4fb12b1ba22fd22acd7aeb4 Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Wed, 17 Jun 2020 08:51:03 +1200 Subject: [PATCH 004/144] add system_dryRun (#6300) * add system_dryRun * fix build error * delete unneeded code * return ApplyExtrinsicResult directly * line width * mark dry run unsafe * line width * fix test * add test * update comment --- Cargo.lock | 3 + bin/node/rpc/Cargo.toml | 1 + bin/node/rpc/src/lib.rs | 9 +- client/consensus/babe/rpc/src/lib.rs | 3 +- utils/frame/rpc/system/Cargo.toml | 2 + utils/frame/rpc/system/src/lib.rs | 182 ++++++++++++++++++++++++--- 6 files changed, 180 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7cd16427c5..4761c859f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3477,6 +3477,7 @@ dependencies = [ "sc-keystore", "sc-rpc-api", "sp-api", + "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", @@ -8116,9 +8117,11 @@ dependencies = [ "log", "parity-scale-codec", "sc-client-api", + "sc-rpc-api", "sc-transaction-pool", "serde", "sp-api", + "sp-block-builder", "sp-blockchain", "sp-core", "sp-runtime", diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0c6c913b13..2bac8b6740 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -31,3 +31,4 @@ sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" sc-finality-grandpa = { version = "0.8.0-rc3", path = "../../../client/finality-grandpa" } sc-finality-grandpa-rpc = { version = "0.8.0-rc3", path = "../../../client/finality-grandpa/rpc" } sc-rpc-api = { version = "0.8.0-rc3", path = "../../../client/rpc-api" } +sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 259a792441..9b6b599174 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -30,7 +30,7 @@ #![warn(missing_docs)] -use std::{sync::Arc, fmt}; +use std::sync::Arc; use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; use node_runtime::UncheckedExtrinsic; @@ -46,6 +46,7 @@ use sc_consensus_babe_rpc::BabeRpcHandler; use sc_finality_grandpa::{SharedVoterState, SharedAuthoritySet}; use sc_finality_grandpa_rpc::GrandpaRpcHandler; use sc_rpc_api::DenyUnsafe; +use sp_block_builder::BlockBuilder; /// Light client extra dependencies. pub struct LightDeps { @@ -104,7 +105,7 @@ pub fn create_full( C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, - ::Error: fmt::Debug, + C::Api: BlockBuilder, P: TransactionPool + 'static, M: jsonrpc_core::Metadata + Default, SC: SelectChain +'static, @@ -133,7 +134,7 @@ pub fn create_full( } = grandpa; io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool)) + SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) ); // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 @@ -185,7 +186,7 @@ pub fn create_light( } = deps; let mut io = jsonrpc_core::IoHandler::default(); io.extend_with( - SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) + SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) ); io diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 8e1282a8d7..35000770d4 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -38,7 +38,7 @@ use sp_api::{ProvideRuntimeApi, BlockId}; use sp_runtime::traits::{Block as BlockT, Header as _}; use sp_consensus::{SelectChain, Error as ConsensusError}; use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; -use std::{collections::HashMap, fmt, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; type FutureResult = Box + Send>; @@ -93,7 +93,6 @@ impl BabeApi for BabeRpcHandler B: BlockT, C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + 'static, C::Api: BabeRuntimeApi, - ::Error: fmt::Debug, SC: SelectChain + Clone + 'static, { fn epoch_authorship(&self) -> FutureResult> { diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 11afd3b841..21cd00ebd4 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -26,6 +26,8 @@ frame-system-rpc-runtime-api = { version = "2.0.0-rc3", path = "../../../../fram sp-core = { version = "2.0.0-rc3", path = "../../../../primitives/core" } sp-blockchain = { version = "2.0.0-rc3", path = "../../../../primitives/blockchain" } sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../../primitives/transaction-pool" } +sp-block-builder = { version = "2.0.0-rc3", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.8.0-rc3", path = "../../../../client/rpc-api" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../../test-utils/runtime/client" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index a3ce1466f6..6927f05b4f 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -22,8 +22,8 @@ use std::sync::Arc; use codec::{self, Codec, Decode, Encode}; use sc_client_api::light::{future_header, RemoteBlockchain, Fetcher, RemoteCallRequest}; use jsonrpc_core::{ - Error, ErrorCode, - futures::future::{result, Future}, + Error as RpcError, ErrorCode, + futures::future::{self as rpc_future,result, Future}, }; use jsonrpc_derive::rpc; use futures::future::{ready, TryFutureExt}; @@ -35,18 +35,20 @@ use sp_runtime::{ generic::BlockId, traits, }; -use sp_core::hexdisplay::HexDisplay; +use sp_core::{hexdisplay::HexDisplay, Bytes}; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; +use sp_block_builder::BlockBuilder; +use sc_rpc_api::DenyUnsafe; pub use frame_system_rpc_runtime_api::AccountNonceApi; pub use self::gen_client::Client as SystemClient; /// Future that resolves to account nonce. -pub type FutureResult = Box + Send>; +pub type FutureResult = Box + Send>; /// System RPC methods. #[rpc] -pub trait SystemApi { +pub trait SystemApi { /// Returns the next valid index (aka nonce) for given account. /// /// This method takes into consideration all pending transactions @@ -54,34 +56,57 @@ pub trait SystemApi { /// it fallbacks to query the index from the runtime (aka. state nonce). #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] fn nonce(&self, account: AccountId) -> FutureResult; + + /// Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. + #[rpc(name = "system_dryRun", alias("system_dryRunAt"))] + fn dry_run(&self, extrinsic: Bytes, at: Option) -> FutureResult; +} + +/// Error type of this RPC api. +pub enum Error { + /// The transaction was not decodable. + DecodeError, + /// The call to runtime failed. + RuntimeError, } -const RUNTIME_ERROR: i64 = 1; +impl From for i64 { + fn from(e: Error) -> i64 { + match e { + Error::RuntimeError => 1, + Error::DecodeError => 2, + } + } +} /// An implementation of System-specific RPC methods on full client. pub struct FullSystem { client: Arc, pool: Arc

, + deny_unsafe: DenyUnsafe, _marker: std::marker::PhantomData, } impl FullSystem { /// Create new `FullSystem` given client and transaction pool. - pub fn new(client: Arc, pool: Arc

) -> Self { + pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe,) -> Self { FullSystem { client, pool, + deny_unsafe, _marker: Default::default(), } } } -impl SystemApi for FullSystem +impl SystemApi<::Hash, AccountId, Index> + for FullSystem where C: sp_api::ProvideRuntimeApi, C: HeaderBackend, C: Send + Sync + 'static, C::Api: AccountNonceApi, + C::Api: BlockBuilder, P: TransactionPool + 'static, Block: traits::Block, AccountId: Clone + std::fmt::Display + Codec, @@ -93,8 +118,8 @@ where let best = self.client.info().best_hash; let at = BlockId::hash(best); - let nonce = api.account_nonce(&at, account.clone()).map_err(|e| Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), + let nonce = api.account_nonce(&at, account.clone()).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), message: "Unable to query nonce.".into(), data: Some(format!("{:?}", e).into()), })?; @@ -104,6 +129,38 @@ where Box::new(result(get_nonce())) } + + fn dry_run(&self, extrinsic: Bytes, at: Option<::Hash>) -> FutureResult { + if let Err(err) = self.deny_unsafe.check_if_safe() { + return Box::new(rpc_future::err(err.into())); + } + + let dry_run = || { + let api = self.client.runtime_api(); + let at = BlockId::::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash + )); + + let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::DecodeError.into()), + message: "Unable to dry run extrinsic.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + let result = api.apply_extrinsic(&at, uxt) + .map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to dry run extrinsic.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + Ok(Encode::encode(&result).into()) + }; + + + Box::new(result(dry_run())) + } } /// An implementation of System-specific RPC methods on light client. @@ -131,7 +188,8 @@ impl LightSystem { } } -impl SystemApi for LightSystem +impl SystemApi<::Hash, AccountId, Index> + for LightSystem where P: TransactionPool + 'static, C: HeaderBackend, @@ -165,8 +223,8 @@ where ).compat(); let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); - let future_nonce = future_nonce.map_err(|e| Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), + let future_nonce = future_nonce.map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), message: "Unable to query nonce.".into(), data: Some(format!("{:?}", e).into()), }); @@ -176,6 +234,14 @@ where Box::new(future_nonce) } + + fn dry_run(&self, _extrinsic: Bytes, _at: Option<::Hash>) -> FutureResult { + Box::new(result(Err(RpcError { + code: ErrorCode::MethodNotFound, + message: "Unable to dry run extrinsic.".into(), + data: None, + }))) + } } /// Adjust account nonce from state, so that tx with the nonce will be @@ -224,6 +290,7 @@ mod tests { use futures::executor::block_on; use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; use sc_transaction_pool::{BasicPool, FullChainApi}; + use sp_runtime::{ApplyExtrinsicResult, transaction_validity::{TransactionValidityError, InvalidTransaction}}; #[test] fn should_return_next_nonce_for_some_account() { @@ -255,7 +322,7 @@ mod tests { let ext1 = new_transaction(1); block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); - let accounts = FullSystem::new(client, pool); + let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); // when let nonce = accounts.nonce(AccountKeyring::Alice.into()); @@ -263,4 +330,91 @@ mod tests { // then assert_eq!(nonce.wait().unwrap(), 2); } + + #[test] + fn dry_run_should_deny_unsafe() { + let _ = env_logger::try_init(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ).0 + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); + + // when + let res = accounts.dry_run(vec![].into(), None); + + // then + assert_eq!(res.wait(), Err(RpcError::method_not_found())); + } + + #[test] + fn dry_run_should_work() { + let _ = env_logger::try_init(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ).0 + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + + let tx = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce: 0, + }.into_signed_tx(); + + // when + let res = accounts.dry_run(tx.encode().into(), None); + + // then + let bytes = res.wait().unwrap().0; + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(apply_res, Ok(Ok(()))); + } + + #[test] + fn dry_run_should_indicate_error() { + let _ = env_logger::try_init(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ).0 + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + + let tx = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce: 100, + }.into_signed_tx(); + + // when + let res = accounts.dry_run(tx.encode().into(), None); + + // then + let bytes = res.wait().unwrap().0; + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(apply_res, Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))); + } } -- GitLab From db8916a48e2bfc9ae9c18c3fa617f7302432c685 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 16 Jun 2020 23:51:45 +0300 Subject: [PATCH 005/144] fix BlockAttributes encoding (#6281) --- client/network/src/block_requests.rs | 49 +++++++++++++++------- client/network/src/light_client_handler.rs | 35 ++++++++++++---- client/network/src/protocol/message.rs | 14 +++++++ 3 files changed, 74 insertions(+), 24 deletions(-) diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs index ae5a3a0b4e..6d698a7300 100644 --- a/client/network/src/block_requests.rs +++ b/client/network/src/block_requests.rs @@ -277,21 +277,13 @@ where return SendRequestOutcome::NotConnected; }; - let protobuf_rq = schema::v1::BlockRequest { - fields: u32::from_be_bytes([req.fields.bits(), 0, 0, 0]), - from_block: match req.from { - message::FromBlock::Hash(h) => - Some(schema::v1::block_request::FromBlock::Hash(h.encode())), - message::FromBlock::Number(n) => - Some(schema::v1::block_request::FromBlock::Number(n.encode())), - }, - to_block: req.to.map(|h| h.encode()).unwrap_or_default(), - direction: match req.direction { - message::Direction::Ascending => schema::v1::Direction::Ascending as i32, - message::Direction::Descending => schema::v1::Direction::Descending as i32, - }, - max_blocks: req.max.unwrap_or(0), - }; + let protobuf_rq = build_protobuf_block_request( + req.fields, + req.from.clone(), + req.to.clone(), + req.direction, + req.max, + ); let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); if let Err(err) = protobuf_rq.encode(&mut buf) { @@ -386,7 +378,7 @@ where return Err(io::Error::new(io::ErrorKind::Other, msg).into()) }; - let attributes = BlockAttributes::decode(&mut request.fields.to_be_bytes().as_ref())?; + let attributes = BlockAttributes::from_be_u32(request.fields)?; let get_header = attributes.contains(BlockAttributes::HEADER); let get_body = attributes.contains(BlockAttributes::BODY); let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); @@ -826,3 +818,28 @@ where }.boxed() } } + +/// Build protobuf block request message. +pub(crate) fn build_protobuf_block_request( + attributes: BlockAttributes, + from_block: message::FromBlock, + to_block: Option, + direction: message::Direction, + max_blocks: Option, +) -> schema::v1::BlockRequest { + schema::v1::BlockRequest { + fields: attributes.to_be_u32(), + from_block: match from_block { + message::FromBlock::Hash(h) => + Some(schema::v1::block_request::FromBlock::Hash(h.encode())), + message::FromBlock::Number(n) => + Some(schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: to_block.map(|h| h.encode()).unwrap_or_default(), + direction: match direction { + message::Direction::Ascending => schema::v1::Direction::Ascending as i32, + message::Direction::Descending => schema::v1::Direction::Descending as i32, + }, + max_blocks: max_blocks.unwrap_or(0), + } +} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 236ae81747..ab6bea8761 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -27,9 +27,10 @@ use bytes::Bytes; use codec::{self, Encode, Decode}; use crate::{ + block_requests::build_protobuf_block_request, chain::Client, config::ProtocolId, - protocol::message::BlockAttributes, + protocol::message::{BlockAttributes, Direction, FromBlock}, schema, }; use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; @@ -1062,13 +1063,13 @@ fn retries(request: &Request) -> usize { fn serialize_request(request: &Request) -> Result, prost::EncodeError> { let request = match request { Request::Body { request, .. } => { - let rq = schema::v1::BlockRequest { - fields: u32::from(BlockAttributes::BODY.bits()), - from_block: Some(schema::v1::block_request::FromBlock::Hash(request.header.hash().encode())), - to_block: Vec::new(), - direction: schema::v1::Direction::Ascending as i32, - max_blocks: 1, - }; + let rq = build_protobuf_block_request::<_, NumberFor>( + BlockAttributes::BODY, + FromBlock::Hash(request.header.hash()), + None, + Direction::Ascending, + Some(1), + ); let mut buf = Vec::with_capacity(rq.encoded_len()); rq.encode(&mut buf)?; return Ok(buf); @@ -2036,4 +2037,22 @@ mod tests { assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_changes_proof` } + + #[test] + fn body_request_fields_encoded_properly() { + let (sender, _) = oneshot::channel(); + let serialized_request = serialize_request::(&Request::Body { + request: RemoteBodyRequest { + header: dummy_header(), + retry_count: None, + }, + sender, + }).unwrap(); + let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); + assert!( + BlockAttributes::from_be_u32(deserialized_request.fields) + .unwrap() + .contains(BlockAttributes::BODY) + ); + } } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index bb2253b733..a7fbb92387 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -87,6 +87,20 @@ bitflags! { } } +impl BlockAttributes { + /// Encodes attributes as big endian u32, compatible with SCALE-encoding (i.e the + /// significant byte has zero index). + pub fn to_be_u32(&self) -> u32 { + u32::from_be_bytes([self.bits(), 0, 0, 0]) + } + + /// Decodes attributes, encoded with the `encode_to_be_u32()` call. + pub fn from_be_u32(encoded: u32) -> Result { + BlockAttributes::from_bits(encoded.to_be_bytes()[0]) + .ok_or_else(|| Error::from("Invalid BlockAttributes")) + } +} + impl Encode for BlockAttributes { fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) -- GitLab From bdae39fb52e1e5394cba2a429ad10c665353e96b Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 17 Jun 2020 12:22:57 +0200 Subject: [PATCH 006/144] Allow Sudo to do anything (#6375) * All Sudo to do anything. * Rename old labels. --- .maintain/gitlab/check_runtime.sh | 2 +- docs/CONTRIBUTING.adoc | 10 +++++----- frame/sudo/src/lib.rs | 12 ++++++------ frame/sudo/src/mock.rs | 18 +++++++++++++----- 4 files changed, 25 insertions(+), 17 deletions(-) diff --git a/.maintain/gitlab/check_runtime.sh b/.maintain/gitlab/check_runtime.sh index 5b7e25e3af..6d009c5aaf 100755 --- a/.maintain/gitlab/check_runtime.sh +++ b/.maintain/gitlab/check_runtime.sh @@ -67,7 +67,7 @@ sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ if [ "${add_spec_version}" != "${sub_spec_version}" ] then - github_label "B2-breaksapi" + github_label "D2-breaksapi" boldcat <<-EOT diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index 3dca7432c0..ec747d6693 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -37,9 +37,9 @@ A PR needs to be reviewed and approved by project maintainers unless: . Please tag each PR with exactly one `A`, `B` and `C` label at the minimum. . Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`A0-pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -. If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. +. If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. . PRs must be tagged with respect to _release notes_ with https://github.com/paritytech/substrate/labels/B0-silent[`B0-silent`] and `B1-..`. The former indicates that no changes should be mentioned in any release notes. The latter indicates that the changes should be reported in the corresponding release note -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/B2-breaksapi[`B2-breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`B3-breaksconsensus`]. +. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/D2-breaksapi[`D2-breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`B3-breaksconsensus`]. . PRs should be labeled with their release importance via the `C1-C9`. . PRs should be categorized into projects. . No PR should be merged until all reviews' comments are addressed and CI is successful. @@ -69,12 +69,12 @@ To create a Polkadot companion PR: . Pull latest Polkadot master (or clone it, if you haven't yet). . Override your local cargo config to point to your local substrate (pointing to your WIP branch): place `paths = ["path/to/substrate"]` in `~/.cargo/config`. . Make the changes required and build polkadot locally. -. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" OR use the same name for your Polkdadot branch as the Substrate branch. +. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" OR use the same name for your Polkdadot branch as the Substrate branch. . Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. . Wait for reviews on both -. Once both PRs have been green lit, they can both be merged 🍻. +. Once both PRs have been green lit, they can both be merged 🍻. -If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. +If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. As there might be multiple pending PRs that might conflict with one another, a) you should not merge the substrate PR until the Polkadot PR has also been reviewed and b) both should be merged pretty quickly after another to not block others. diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 55c2c97d12..233e75e869 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -88,12 +88,12 @@ #![cfg_attr(not(feature = "std"), no_std)] use sp_std::prelude::*; -use sp_runtime::{DispatchResult, traits::{StaticLookup, Dispatchable}}; +use sp_runtime::{DispatchResult, traits::StaticLookup}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, }; -use frame_support::weights::{Weight, GetDispatchInfo}; +use frame_support::{weights::{Weight, GetDispatchInfo}, traits::UnfilteredDispatchable}; use frame_system::{self as system, ensure_signed}; #[cfg(test)] @@ -106,7 +106,7 @@ pub trait Trait: frame_system::Trait { type Event: From> + Into<::Event>; /// A sudo-able call. - type Call: Parameter + Dispatchable + GetDispatchInfo; + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; } decl_module! { @@ -132,7 +132,7 @@ decl_module! { let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); - let res = call.dispatch(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); } @@ -152,7 +152,7 @@ decl_module! { let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); - let res = call.dispatch(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); } @@ -195,7 +195,7 @@ decl_module! { let who = T::Lookup::lookup(who)?; - let res = match call.dispatch(frame_system::RawOrigin::Signed(who).into()) { + let res = match call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()) { Ok(_) => true, Err(e) => { sp_runtime::print(e); diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 73c3609d3f..3bf67f581b 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -24,10 +24,11 @@ use frame_support::{ }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures -// or public keys. +// or public keys. use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; use crate as sudo; +use frame_support::traits::Filter; // Logger module to track execution. pub mod logger { @@ -58,7 +59,7 @@ pub mod logger { #[weight = *weight] fn privileged_i32_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is `Root`. + // Ensure that the `origin` is `Root`. ensure_root(origin)?; ::append(i); Self::deposit_event(RawEvent::AppendI32(i, weight)); @@ -66,7 +67,7 @@ pub mod logger { #[weight = *weight] fn non_privileged_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is some signed account. + // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; ::append(i); >::append(sender.clone()); @@ -112,8 +113,15 @@ parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::one(); } +pub struct BlockEverything; +impl Filter for BlockEverything { + fn filter(_: &Call) -> bool { + false + } +} + impl frame_system::Trait for Test { - type BaseCallFilter = (); + type BaseCallFilter = BlockEverything; type Origin = Origin; type Call = Call; type Index = u64; @@ -121,7 +129,7 @@ impl frame_system::Trait for Test { type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; - type Lookup = IdentityLookup; + type Lookup = IdentityLookup; type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; -- GitLab From 17be6fd5e5dec2b358dbffa9ce07e4bc1d3e01e2 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 17 Jun 2020 12:24:32 +0200 Subject: [PATCH 007/144] Stored call in multisig (#6319) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Stored call in multisig * Docs. * Benchmarks. * Fix * Update frame/multisig/src/lib.rs Co-authored-by: Bastian Köcher * patch benchmarks * Minor grumbles. * Update as_multi weight * Fixes and refactoring. * Split out threshold=1 and opaquify Call. * Compiles, tests pass, weights are broken * Update benchmarks, add working tests * Add benchmark to threshold 1, add event too * suppress warning for now * @xlc improvment nit * Update weight and tests * Test for weight check * Fix line width * one more line width error * Apply suggestions from code review Co-authored-by: Alexander Popiak * fix merge * more @apopiak feedback * Multisig handles no preimage * Optimize return weight after dispatch * Error on failed deposit. Co-authored-by: Bastian Köcher Co-authored-by: Shawn Tabrizi Co-authored-by: Alexander Popiak --- frame/multisig/src/benchmarking.rs | 176 +++++++++-- frame/multisig/src/lib.rs | 475 +++++++++++++++++++---------- frame/multisig/src/tests.rs | 295 ++++++++++++++---- 3 files changed, 708 insertions(+), 238 deletions(-) diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index fa2ec52e6b..9479c16cb2 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -22,14 +22,15 @@ use super::*; use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, account}; -use sp_runtime::traits::Saturating; +use sp_runtime::traits::{Bounded, Saturating}; +use core::convert::TryInto; use crate::Module as Multisig; const SEED: u32 = 0; fn setup_multi(s: u32, z: u32) - -> Result<(Vec, Box<::Call>), &'static str> + -> Result<(Vec, Vec), &'static str> { let mut signatories: Vec = Vec::new(); for i in 0 .. s { @@ -41,36 +42,79 @@ fn setup_multi(s: u32, z: u32) signatories.push(signatory); } signatories.sort(); - let call: Box<::Call> = Box::new(frame_system::Call::remark(vec![0; z as usize]).into()); - return Ok((signatories, call)) + // Must first convert to outer call type. + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call_data = call.encode(); + return Ok((signatories, call_data)) } benchmarks! { _ { } + as_multi_threshold_1 { + // Transaction Length + let z in 0 .. 10_000; + let max_signatories = T::MaxSignatories::get().into(); + let (mut signatories, _) = setup_multi::(max_signatories, z)?; + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call_hash = call.using_encoded(blake2_256); + let multi_account_id = Multisig::::multi_account_id(&signatories, 1); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + }: _(RawOrigin::Signed(caller.clone()), signatories, Box::new(call)) + verify { + // If the benchmark resolves, then the call was dispatched successfully. + } + as_multi_create { // Signatories, need at least 2 total people let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(&call); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, 0) + verify { + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + } - as_multi_approve { - // Signatories, need at least 2 people + as_multi_create_store { + // Signatories, need at least 2 total people let s in 2 .. T::MaxSignatories::get() as u32; // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(&call); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, 0) + verify { + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + assert!(Calls::::contains_key(call_hash)); + } + + as_multi_approve { + // Signatories, need at least 3 people (so we don't complete the multisig) + let s in 3 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(&call); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + // Create the multi, storing for worst case + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; let caller2 = signatories2.remove(0); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, 0) + verify { + let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + assert_eq!(multisig.approvals.len(), 2); + } as_multi_complete { // Signatories, need at least 2 people @@ -78,21 +122,27 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(&call); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + // Create the multi, storing it for worst case + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone())?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; } let caller2 = signatories2.remove(0); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::max_value()) + verify { + assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); + } approve_as_multi_create { // Signatories, need at least 2 people @@ -100,10 +150,14 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(&call); // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash) + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, 0) + verify { + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + } approve_as_multi_approve { // Signatories, need at least 2 people @@ -112,14 +166,63 @@ benchmarks! { let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let mut signatories2 = signatories.clone(); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(&call); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + Multisig::::as_multi( + RawOrigin::Signed(caller.clone()).into(), + s as u16, + signatories, + None, + call.clone(), + false, + 0 + )?; let caller2 = signatories2.remove(0); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash) + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, 0) + verify { + let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + assert_eq!(multisig.approvals.len(), 2); + } + + approve_as_multi_complete { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let call_hash = blake2_256(&call); + // before the call, get the timepoint + let timepoint = Multisig::::timepoint(); + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + // Everyone except the first person approves + for i in 1 .. s - 1 { + let mut signatories_loop = signatories2.clone(); + let caller_loop = signatories_loop.remove(i as usize); + let o = RawOrigin::Signed(caller_loop).into(); + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + } + let caller2 = signatories2.remove(0); + assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + }: approve_as_multi( + RawOrigin::Signed(caller2), + s as u16, + signatories2, + Some(timepoint), + call_hash, + Weight::max_value() + ) + verify { + assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); + } cancel_as_multi { // Signatories, need at least 2 people @@ -127,13 +230,40 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); + let call_hash = blake2_256(&call); let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call.clone())?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call.clone(), true, 0)?; + assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) + verify { + assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); + } + + cancel_as_multi_store { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let call_hash = blake2_256(&call); + let timepoint = Multisig::::timepoint(); + // Create the multi + let o = RawOrigin::Signed(caller.clone()).into(); + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call.clone(), true, 0)?; + assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + assert!(Calls::::contains_key(call_hash)); + }: cancel_as_multi(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) + verify { + assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); + assert!(!Calls::::contains_key(call_hash)); + } } #[cfg(test)] @@ -145,12 +275,16 @@ mod tests { #[test] fn test_benchmarks() { new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_as_multi_threshold_1::()); assert_ok!(test_benchmark_as_multi_create::()); + assert_ok!(test_benchmark_as_multi_create_store::()); assert_ok!(test_benchmark_as_multi_approve::()); assert_ok!(test_benchmark_as_multi_complete::()); assert_ok!(test_benchmark_approve_as_multi_create::()); assert_ok!(test_benchmark_approve_as_multi_approve::()); + assert_ok!(test_benchmark_approve_as_multi_complete::()); assert_ok!(test_benchmark_cancel_as_multi::()); + assert_ok!(test_benchmark_cancel_as_multi_store::()); }); } } diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 672e6bed20..50bd96aca3 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -51,11 +51,11 @@ use codec::{Encode, Decode}; use sp_io::hashing::blake2_256; use frame_support::{decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug}; use frame_support::{traits::{Get, ReservableCurrency, Currency}, - weights::{Weight, GetDispatchInfo, DispatchClass, Pays}, + weights::{Weight, GetDispatchInfo, constants::{WEIGHT_PER_NANOS, WEIGHT_PER_MICROS}}, dispatch::{DispatchResultWithPostInfo, DispatchErrorWithPostInfo, PostDispatchInfo}, }; -use frame_system::{self as system, ensure_signed}; -use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; +use frame_system::{self as system, ensure_signed, RawOrigin}; +use sp_runtime::{DispatchError, DispatchResult, traits::{Dispatchable, Zero}}; mod tests; mod benchmarking; @@ -74,10 +74,12 @@ pub trait Trait: frame_system::Trait { /// The currency mechanism. type Currency: ReservableCurrency; - /// The base amount of currency needed to reserve for creating a multisig execution. + /// The base amount of currency needed to reserve for creating a multisig execution or to store + /// a dispatch call for later. /// /// This is held for an additional storage item whose value size is - /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes. + /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is + /// `32 + sizeof(AccountId)` bytes. type DepositBase: Get>; /// The amount of currency needed per unit threshold when creating a multisig execution. @@ -119,13 +121,15 @@ decl_storage! { pub Multisigs: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] => Option, T::AccountId>>; + + pub Calls: map hasher(identity) [u8; 32] => Option<(Vec, T::AccountId, BalanceOf)>; } } decl_error! { pub enum Error for Module { - /// Threshold is too low (zero). - ZeroThreshold, + /// Threshold must be 2 or greater. + MinimumThreshold, /// Call is already approved by this signatory. AlreadyApproved, /// Call doesn't need any (more) approvals. @@ -148,6 +152,10 @@ decl_error! { WrongTimepoint, /// A timepoint was given, yet no multisig operation is underway. UnexpectedTimepoint, + /// The maximum weight information provided was too low. + WeightTooLow, + /// The data to be stored is already stored. + AlreadyStored, } } @@ -176,22 +184,50 @@ decl_event! { mod weight_of { use super::*; + /// - Base Weight: 33.72 + 0.002 * Z µs + /// - DB Weight: None + /// - Plus Call Weight + pub fn as_multi_threshold_1( + call_len: usize, + call_weight: Weight, + ) -> Weight { + (34 * WEIGHT_PER_MICROS) + .saturating_add((2 * WEIGHT_PER_NANOS).saturating_mul(call_len as Weight)) + .saturating_add(call_weight) + } + /// - Base Weight: - /// - Create: 46.55 + 0.089 * S µs - /// - Approve: 34.03 + .112 * S µs - /// - Complete: 40.36 + .225 * S µs + /// - Create: 38.82 + 0.121 * S + .001 * Z µs + /// - Create w/ Store: 54.22 + 0.120 * S + .003 * Z µs + /// - Approve: 29.86 + 0.143 * S + .001 * Z µs + /// - Complete: 39.55 + 0.267 * S + .002 * Z µs /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account] - /// - Writes: Multisig Storage, [Caller Account] + /// - Reads: Multisig Storage, [Caller Account], Calls, Depositor Account + /// - Writes: Multisig Storage, [Caller Account], Calls, Depositor Account /// - Plus Call Weight - pub fn as_multi(other_sig_len: usize, call_weight: Weight) -> Weight { + pub fn as_multi( + sig_len: usize, + call_len: usize, + call_weight: Weight, + calls_write: bool, + refunded: bool, + ) -> Weight { call_weight - .saturating_add(45_000_000) - .saturating_add((other_sig_len as Weight).saturating_mul(250_000)) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(55 * WEIGHT_PER_MICROS) + .saturating_add((250 * WEIGHT_PER_NANOS).saturating_mul(sig_len as Weight)) + .saturating_add((3 * WEIGHT_PER_NANOS).saturating_mul(call_len as Weight)) + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) // Multisig read/write + .saturating_add(T::DbWeight::get().reads(1)) // Calls read + .saturating_add(T::DbWeight::get().writes(calls_write.into())) // Calls write + .saturating_add(T::DbWeight::get().reads_writes(refunded.into(), refunded.into())) // Deposit refunded } } +enum CallOrHash { + Call(Vec, bool), + Hash([u8; 32]), +} + decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; @@ -210,6 +246,66 @@ decl_module! { 1_000_000_000 } + /// Immediately dispatch a multi-signature call using a single approval from the caller. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `other_signatories`: The accounts (other than the sender) who are part of the + /// multi-signature, but do not participate in the approval process. + /// - `call`: The call to be executed. + /// + /// Result is equivalent to the dispatched result. + /// + /// # + /// O(Z + C) where Z is the length of the call and C its execution weight. + /// ------------------------------- + /// - Base Weight: 33.72 + 0.002 * Z µs + /// - DB Weight: None + /// - Plus Call Weight + /// # + #[weight = ( + weight_of::as_multi_threshold_1::( + call.using_encoded(|c| c.len()), + call.get_dispatch_info().weight + ), + call.get_dispatch_info().class, + )] + fn as_multi_threshold_1(origin, + other_signatories: Vec, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let max_sigs = T::MaxSignatories::get() as usize; + ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); + let other_signatories_len = other_signatories.len(); + ensure!(other_signatories_len < max_sigs, Error::::TooManySignatories); + let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; + + let id = Self::multi_account_id(&signatories, 1); + + let call_len = call.using_encoded(|c| c.len()); + let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); + + result.map(|post_dispatch_info| post_dispatch_info.actual_weight + .map(|actual_weight| weight_of::as_multi_threshold_1::( + call_len, + actual_weight, + )) + .into() + ).map_err(|err| match err.post_info.actual_weight { + Some(actual_weight) => { + let weight_used = weight_of::as_multi_threshold_1::( + call_len, + actual_weight, + ); + let post_info = Some(weight_used).into(); + let error = err.error.into(); + DispatchErrorWithPostInfo { post_info, error } + }, + None => err, + }) + } + /// Register approval for a dispatch to be made from a deterministic composite account if /// approved by a total of `threshold - 1` of `other_signatories`. /// @@ -252,99 +348,32 @@ decl_module! { /// `DepositBase + threshold * DepositFactor`. /// ------------------------------- /// - Base Weight: - /// - Create: 46.55 + 0.089 * S µs - /// - Approve: 34.03 + .112 * S µs - /// - Complete: 40.36 + .225 * S µs + /// - Create: 41.89 + 0.118 * S + .002 * Z µs + /// - Create w/ Store: 53.57 + 0.119 * S + .003 * Z µs + /// - Approve: 31.39 + 0.136 * S + .002 * Z µs + /// - Complete: 39.94 + 0.26 * S + .002 * Z µs /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account] - /// - Writes: Multisig Storage, [Caller Account] + /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) + /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Plus Call Weight /// # - #[weight = ( - weight_of::as_multi::(other_signatories.len(), call.get_dispatch_info().weight), - call.get_dispatch_info().class, - Pays::Yes, + #[weight = weight_of::as_multi::( + other_signatories.len(), + call.len(), + *max_weight, + true, // assume worst case: calls write + true, // assume worst case: refunded )] fn as_multi(origin, threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: Box<::Call>, + call: Vec, + store_call: bool, + max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); - let max_sigs = T::MaxSignatories::get() as usize; - ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); - let other_signatories_len = other_signatories.len(); - ensure!(other_signatories_len < max_sigs, Error::::TooManySignatories); - let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; - - let id = Self::multi_account_id(&signatories, threshold); - let call_hash = call.using_encoded(blake2_256); - - if let Some(mut m) = >::get(&id, call_hash) { - let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; - ensure!(m.when == timepoint, Error::::WrongTimepoint); - if let Err(pos) = m.approvals.binary_search(&who) { - // we know threshold is greater than zero from the above ensure. - if (m.approvals.len() as u16) < threshold - 1 { - m.approvals.insert(pos, who.clone()); - >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); - // Call is not made, so the actual weight does not include call - return Ok(Some(weight_of::as_multi::(other_signatories_len, 0)).into()) - } - } else { - if (m.approvals.len() as u16) < threshold { - Err(Error::::AlreadyApproved)? - } - } - - let result = call.dispatch(frame_system::RawOrigin::Signed(id.clone()).into()); - let _ = T::Currency::unreserve(&m.depositor, m.deposit); - >::remove(&id, call_hash); - Self::deposit_event(RawEvent::MultisigExecuted( - who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) - )); - return Ok(None.into()) - } else { - ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); - if threshold > 1 { - let deposit = T::DepositBase::get() - + T::DepositFactor::get() * threshold.into(); - T::Currency::reserve(&who, deposit)?; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); - Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); - // Call is not made, so we can return that weight - return Ok(Some(weight_of::as_multi::(other_signatories_len, 0)).into()) - } else { - let result = call.dispatch(frame_system::RawOrigin::Signed(id).into()); - match result { - Ok(post_dispatch_info) => { - match post_dispatch_info.actual_weight { - Some(actual_weight) => return Ok(Some(weight_of::as_multi::(other_signatories_len, actual_weight)).into()), - None => return Ok(None.into()), - } - }, - Err(err) => { - match err.post_info.actual_weight { - Some(actual_weight) => { - let weight_used = weight_of::as_multi::(other_signatories_len, actual_weight); - return Err(DispatchErrorWithPostInfo { post_info: Some(weight_used).into(), error: err.error.into() }) - }, - None => { - return Err(err) - } - } - } - } - } - } + Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Call(call, store_call), max_weight) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -386,57 +415,22 @@ decl_module! { /// - Read: Multisig Storage, [Caller Account] /// - Write: Multisig Storage, [Caller Account] /// # - #[weight = ( - T::DbWeight::get().reads_writes(1, 1) - .saturating_add(45_000_000) - .saturating_add((other_signatories.len() as Weight).saturating_mul(120_000)), - DispatchClass::Normal, - Pays::Yes, + #[weight = weight_of::as_multi::( + other_signatories.len(), + 0, // call_len is zero in this case + *max_weight, + true, // assume worst case: calls write + true, // assume worst case: refunded )] fn approve_as_multi(origin, threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, call_hash: [u8; 32], - ) -> DispatchResult { + max_weight: Weight, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); - let max_sigs = T::MaxSignatories::get() as usize; - ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); - ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); - let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; - - let id = Self::multi_account_id(&signatories, threshold); - - if let Some(mut m) = >::get(&id, call_hash) { - let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; - ensure!(m.when == timepoint, Error::::WrongTimepoint); - ensure!(m.approvals.len() < threshold as usize, Error::::NoApprovalsNeeded); - if let Err(pos) = m.approvals.binary_search(&who) { - m.approvals.insert(pos, who.clone()); - >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); - } else { - Err(Error::::AlreadyApproved)? - } - } else { - if threshold > 1 { - ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); - let deposit = T::DepositBase::get() - + T::DepositFactor::get() * threshold.into(); - T::Currency::reserve(&who, deposit)?; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); - Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); - } else { - Err(Error::::NoApprovalsNeeded)? - } - } - Ok(()) + Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Hash(call_hash), max_weight) } /// Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously @@ -461,18 +455,15 @@ decl_module! { /// - I/O: 1 read `O(S)`, one remove. /// - Storage: removes one item. /// ---------------------------------- - /// - Base Weight: 37.6 + 0.084 * S + /// - Base Weight: 36.07 + 0.124 * S /// - DB Weight: - /// - Read: Multisig Storage, [Caller Account] - /// - Write: Multisig Storage, [Caller Account] + /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls + /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls /// # - #[weight = ( - T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((other_signatories.len() as Weight).saturating_mul(100_000)), - DispatchClass::Normal, - Pays::Yes, - )] + #[weight = T::DbWeight::get().reads_writes(3, 3) + .saturating_add(36 * WEIGHT_PER_MICROS) + .saturating_add((other_signatories.len() as Weight).saturating_mul(100 * WEIGHT_PER_NANOS)) + ] fn cancel_as_multi(origin, threshold: u16, other_signatories: Vec, @@ -480,7 +471,7 @@ decl_module! { call_hash: [u8; 32], ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); + ensure!(threshold >= 2, Error::::MinimumThreshold); let max_sigs = T::MaxSignatories::get() as usize; ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); @@ -494,7 +485,8 @@ decl_module! { ensure!(m.depositor == who, Error::::NotOwner); let _ = T::Currency::unreserve(&m.depositor, m.deposit); - >::remove(&id, call_hash); + >::remove(&id, &call_hash); + Self::clear_call(&call_hash); Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id, call_hash)); Ok(()) @@ -512,6 +504,169 @@ impl Module { T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } + fn operate( + who: T::AccountId, + threshold: u16, + other_signatories: Vec, + maybe_timepoint: Option>, + call_or_hash: CallOrHash, + max_weight: Weight, + ) -> DispatchResultWithPostInfo { + ensure!(threshold >= 2, Error::::MinimumThreshold); + let max_sigs = T::MaxSignatories::get() as usize; + ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); + let other_signatories_len = other_signatories.len(); + ensure!(other_signatories_len < max_sigs, Error::::TooManySignatories); + let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; + + let id = Self::multi_account_id(&signatories, threshold); + + // Threshold > 1; this means it's a multi-step operation. We extract the `call_hash`. + let (call_hash, call_len, maybe_call, store) = match call_or_hash { + CallOrHash::Call(call, should_store) => { + let call_hash = blake2_256(&call); + let call_len = call.len(); + (call_hash, call_len, Some(call), should_store) + } + CallOrHash::Hash(h) => (h, 0, None, false), + }; + + // Branch on whether the operation has already started or not. + if let Some(mut m) = >::get(&id, call_hash) { + // Yes; ensure that the timepoint exists and agrees. + let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; + ensure!(m.when == timepoint, Error::::WrongTimepoint); + + // Ensure that either we have not yet signed or that it is at threshold. + let mut approvals = m.approvals.len() as u16; + // We only bother with the approval if we're below threshold. + let maybe_pos = m.approvals.binary_search(&who).err().filter(|_| approvals < threshold); + // Bump approvals if not yet voted and the vote is needed. + if maybe_pos.is_some() { approvals += 1; } + + // We only bother fetching/decoding call if we know that we're ready to execute. + let maybe_approved_call = if approvals >= threshold { + Self::get_call(&call_hash, maybe_call.as_ref().map(|c| c.as_ref())) + } else { None }; + + if let Some(call) = maybe_approved_call { + // verify weight + ensure!(call.get_dispatch_info().weight <= max_weight, Error::::WeightTooLow); + + let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); + T::Currency::unreserve(&m.depositor, m.deposit); + >::remove(&id, call_hash); + Self::clear_call(&call_hash); + Self::deposit_event(RawEvent::MultisigExecuted( + who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) + )); + Ok(get_result_weight(result).map(|actual_weight| weight_of::as_multi::( + other_signatories_len, + call_len, + actual_weight, + true, // Call is removed + true, // User is refunded + )).into()) + } else { + // We cannot dispatch the call now; either it isn't available, or it is, but we + // don't have threshold approvals even with our signature. + + // Store the call if desired. + let stored = if let Some(data) = maybe_call.filter(|_| store) { + Self::store_call_and_reserve(who.clone(), &call_hash, data, BalanceOf::::zero())?; + true + } else { + false + }; + + if let Some(pos) = maybe_pos { + // Record approval. + m.approvals.insert(pos, who.clone()); + >::insert(&id, call_hash, m); + Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); + } else { + // If we already approved and didn't store the Call, then this was useless and + // we report an error. + ensure!(stored, Error::::AlreadyApproved); + } + + // Call is not made, so the actual weight does not include call + Ok(Some(weight_of::as_multi::( + other_signatories_len, + call_len, + 0, + stored, // Call stored? + false, // No refund + )).into()) + } + } else { + // Not yet started; there should be no timepoint given. + ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); + + // Just start the operation by recording it in storage. + let deposit = T::DepositBase::get() + T::DepositFactor::get() * threshold.into(); + + // Store the call if desired. + let stored = if let Some(data) = maybe_call.filter(|_| store) { + Self::store_call_and_reserve(who.clone(), &call_hash, data, deposit)?; + true + } else { + T::Currency::reserve(&who, deposit)?; + false + }; + + >::insert(&id, call_hash, Multisig { + when: Self::timepoint(), + deposit, + depositor: who.clone(), + approvals: vec![who.clone()], + }); + Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); + // Call is not made, so we can return that weight + return Ok(Some(weight_of::as_multi::( + other_signatories_len, + call_len, + 0, + stored, // Call stored? + false, // No refund + )).into()) + } + } + + /// Place a call's encoded data in storage, reserving funds as appropriate. + /// + /// We store `data` here because storing `call` would result in needing another `.encode`. + /// + /// Returns a `bool` indicating whether the data did end up being stored. + fn store_call_and_reserve(who: T::AccountId, hash: &[u8; 32], data: Vec, other_deposit: BalanceOf) + -> DispatchResult + { + ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); + let deposit = other_deposit + T::DepositBase::get() + + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); + T::Currency::reserve(&who, deposit)?; + Calls::::insert(&hash, (data, who, deposit)); + Ok(()) + } + + /// Attempt to decode and return the call, provided by the user or from storage. + fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<::Call> { + maybe_known.map_or_else(|| { + Calls::::get(hash).and_then(|(data, ..)| { + Decode::decode(&mut &data[..]).ok() + }) + }, |data| { + Decode::decode(&mut &data[..]).ok() + }) + } + + /// Attempt to remove a call from storage, returning any deposit on it to the owner. + fn clear_call(hash: &[u8; 32]) { + if let Some((_, who, deposit)) = Calls::::take(hash) { + T::Currency::unreserve(&who, deposit); + } + } + /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { @@ -541,3 +696,13 @@ impl Module { Ok(signatories) } } + +/// Return the weight of a dispatch call result as an `Option`. +/// +/// Will return the weight regardless of what the state of the result is. +fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { + match result { + Ok(post_info) => post_info.actual_weight, + Err(err) => err.post_info.actual_weight, + } +} diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 4b1395476d..4911ca90cf 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -156,24 +156,79 @@ fn multisig_deposit_is_taken_and_returned() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); }); } +#[test] +fn multisig_deposit_is_taken_and_returned_with_call_storage() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::reserved_balance(1), 5); + + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 4); + + assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), data, true, 0)); + assert_eq!(Balances::free_balance(2), 3); + assert_eq!(Balances::reserved_balance(2), 2); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::reserved_balance(1), 4); + + assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash, call_weight)); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(2), 5); + assert_eq!(Balances::reserved_balance(2), 0); + }); +} + #[test] fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); assert_ok!( @@ -192,28 +247,48 @@ fn timepoint_checking_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone()), + Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0), Error::::UnexpectedTimepoint, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone()), + Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone(), false, 0), Error::::NoTimepoint, ); let later = Timepoint { index: 1, .. now() }; assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone()), + Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone(), false, 0), Error::::WrongTimepoint, ); }); } +#[test] +fn multisig_2_of_3_works_with_call_storing() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_eq!(Balances::free_balance(6), 15); + }); +} + #[test] fn multisig_2_of_3_works() { new_test_ext().execute_with(|| { @@ -222,12 +297,14 @@ fn multisig_2_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); assert_eq!(Balances::free_balance(6), 15); }); } @@ -240,13 +317,15 @@ fn multisig_3_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), call)); + assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); assert_eq!(Balances::free_balance(6), 15); }); } @@ -254,10 +333,10 @@ fn multisig_3_of_3_works() { #[test] fn cancel_multisig_works() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, @@ -268,6 +347,40 @@ fn cancel_multisig_works() { }); } +#[test] +fn cancel_multisig_with_call_storage_works() { + new_test_ext().execute_with(|| { + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::as_multi(Origin::signed(1), 3, vec![2, 3], None, call, true, 0)); + assert_eq!(Balances::free_balance(1), 4); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_noop!( + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Error::::NotOwner, + ); + assert_ok!( + Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), + ); + assert_eq!(Balances::free_balance(1), 10); + }); +} + +#[test] +fn cancel_multisig_with_alt_call_storage_works() { + new_test_ext().execute_with(|| { + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_eq!(Balances::free_balance(1), 6); + assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), call, true, 0)); + assert_eq!(Balances::free_balance(2), 8); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }); +} + #[test] fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { @@ -276,11 +389,13 @@ fn multisig_2_of_3_as_multi_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); assert_eq!(Balances::free_balance(6), 15); }); } @@ -293,13 +408,17 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call1 = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - let call2 = Box::new(Call::Balances(BalancesCall::transfer(7, 5))); + let call1 = Call::Balances(BalancesCall::transfer(6, 10)); + let call1_weight = call1.get_dispatch_info().weight; + let data1 = call1.encode(); + let call2 = Call::Balances(BalancesCall::transfer(7, 5)); + let call2_weight = call2.get_dispatch_info().weight; + let data2 = call2.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, call1.clone())); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call2.clone())); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call2)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call1)); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data1.clone(), false, 0)); + assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, data2.clone(), false, 0)); + assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data1, false, call1_weight)); + assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data2, false, call2_weight)); assert_eq!(Balances::free_balance(6), 10); assert_eq!(Balances::free_balance(7), 5); @@ -314,26 +433,33 @@ fn multisig_2_of_3_cannot_reissue_same_call() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 10)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data.clone(), false, call_weight)); assert_eq!(Balances::free_balance(multi), 5); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call.clone())); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data.clone(), false, call_weight)); let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); - expect_event(RawEvent::MultisigExecuted(3, now(), multi, call.using_encoded(blake2_256), Err(err))); + expect_event(RawEvent::MultisigExecuted(3, now(), multi, hash, Err(err))); }); } #[test] -fn zero_threshold_fails() { +fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + assert_noop!( + Multisig::as_multi(Origin::signed(1), 0, vec![2], None, call.clone(), false, 0), + Error::::MinimumThreshold, + ); assert_noop!( - Multisig::as_multi(Origin::signed(1), 0, vec![2], None, call), - Error::::ZeroThreshold, + Multisig::as_multi(Origin::signed(1), 1, vec![2], None, call.clone(), false, 0), + Error::::MinimumThreshold, ); }); } @@ -341,9 +467,9 @@ fn zero_threshold_fails() { #[test] fn too_many_signatories_fails() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); assert_noop!( - Multisig::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone()), + Multisig::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone(), false, 0), Error::::TooManySignatories, ); }); @@ -352,17 +478,17 @@ fn too_many_signatories_fails() { #[test] fn duplicate_approvals_are_ignored() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone())); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone(), 0)); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone()), + Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone(), 0), Error::::AlreadyApproved, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone())); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0)); assert_noop!( - Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone()), - Error::::NoApprovalsNeeded, + Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone(), 0), + Error::::AlreadyApproved, ); }); } @@ -375,17 +501,18 @@ fn multisig_1_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); + let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone()), - Error::::NoApprovalsNeeded, + Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone(), 0), + Error::::MinimumThreshold, ); assert_noop!( - Multisig::as_multi(Origin::signed(4), 1, vec![2, 3], None, call.clone()), - BalancesError::::InsufficientBalance, + Multisig::as_multi(Origin::signed(1), 1, vec![2, 3], None, call.clone(), false, 0), + Error::::MinimumThreshold, ); - assert_ok!(Multisig::as_multi(Origin::signed(1), 1, vec![2, 3], None, call)); + let boxed_call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_ok!(Multisig::as_multi_threshold_1(Origin::signed(1), vec![2, 3], boxed_call)); assert_eq!(Balances::free_balance(6), 15); }); @@ -396,8 +523,52 @@ fn multisig_filters() { new_test_ext().execute_with(|| { let call = Box::new(Call::System(frame_system::Call::set_code(vec![]))); assert_noop!( - Multisig::as_multi(Origin::signed(1), 1, vec![2], None, call.clone()), + Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), DispatchError::BadOrigin, ); }); } + +#[test] +fn weight_check_works() { + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let data = call.encode(); + assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_eq!(Balances::free_balance(6), 0); + + assert_noop!( + Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, 0), + Error::::WeightTooLow, + ); + }); +} + +#[test] +fn multisig_handles_no_preimage_after_all_approve() { + // This test checks the situation where everyone approves a multi-sig, but no-one provides the call data. + // In the end, any of the multisig callers can approve again with the call data and the call will go through. + new_test_ext().execute_with(|| { + let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call_weight = call.get_dispatch_info().weight; + let data = call.encode(); + let hash = blake2_256(&data); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash.clone(), 0)); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); + assert_eq!(Balances::free_balance(6), 15); + }); +} -- GitLab From 0c42cedaac0b1bf3a608031ee3e494b51bfaa0fe Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 17 Jun 2020 15:20:17 +0200 Subject: [PATCH 008/144] Fix the broken weight multiplier update function (#6334) * Initial draft, has some todos left * remove ununsed import * Apply suggestions from code review * Some refactors with migration * Fix more test and cleanup * Fix for companion * Apply suggestions from code review Co-authored-by: Alexander Popiak * Update bin/node/runtime/src/impls.rs * Fix weight * Add integrity test * length is not affected. Co-authored-by: Alexander Popiak --- Cargo.lock | 1 + bin/node/executor/tests/basic.rs | 55 ++--- bin/node/executor/tests/fees.rs | 12 +- bin/node/runtime/src/impls.rs | 294 +++++++++++----------- bin/node/runtime/src/lib.rs | 21 +- frame/balances/src/tests.rs | 7 +- frame/system/src/lib.rs | 2 +- frame/transaction-payment/Cargo.toml | 2 + frame/transaction-payment/src/lib.rs | 296 +++++++++++++++++++---- primitives/arithmetic/src/fixed_point.rs | 17 ++ 10 files changed, 456 insertions(+), 251 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4761c859f8..aeacd6e353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4587,6 +4587,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", + "serde", "smallvec 1.4.0", "sp-core", "sp-io", diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 2bb444b47c..e4de98d90e 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -19,14 +19,11 @@ use codec::{Encode, Decode, Joiner}; use frame_support::{ StorageValue, StorageMap, traits::Currency, - weights::{ - GetDispatchInfo, DispatchInfo, DispatchClass, constants::ExtrinsicBaseWeight, - WeightToFeePolynomial, - }, + weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, }; use sp_core::{NeverNativeValue, traits::Externalities, storage::well_known_keys}; use sp_runtime::{ - ApplyExtrinsicResult, FixedI128, FixedPointNumber, + ApplyExtrinsicResult, traits::Hash as HashT, transaction_validity::InvalidTransaction, }; @@ -35,7 +32,7 @@ use frame_system::{self, EventRecord, Phase}; use node_runtime::{ Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, - System, TransactionPayment, Event, TransactionByteFee, + System, TransactionPayment, Event, constants::currency::*, }; use node_primitives::{Balance, Hash}; @@ -52,16 +49,17 @@ use self::common::{*, sign}; /// test code paths that differ between native and wasm versions. pub const BLOATY_CODE: &[u8] = node_runtime::WASM_BINARY_BLOATY; -/// Default transfer fee -fn transfer_fee(extrinsic: &E, fee_multiplier: FixedI128) -> Balance { - let length_fee = TransactionByteFee::get() * (extrinsic.encode().len() as Balance); - - let base_weight = ExtrinsicBaseWeight::get(); - let base_fee = ::WeightToFee::calc(&base_weight); - let weight = default_transfer_call().get_dispatch_info().weight; - let weight_fee = ::WeightToFee::calc(&weight); - - base_fee + fee_multiplier.saturating_mul_acc_int(length_fee + weight_fee) +/// Default transfer fee. This will use the same logic that is implemented in transaction-payment module. +/// +/// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic` +/// at block `n`, it must be called prior to executing block `n` to do the calculation with the +/// correct multiplier. +fn transfer_fee(extrinsic: &E) -> Balance { + TransactionPayment::compute_fee( + extrinsic.encode().len() as u32, + &default_transfer_call().get_dispatch_info(), + 0, + ) } fn xt() -> UncheckedExtrinsic { @@ -242,7 +240,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { ).0; assert!(r.is_ok()); - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let fees = t.execute_with(|| transfer_fee(&xt())); let r = executor_call:: _>( &mut t, @@ -254,7 +252,6 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { assert!(r.is_ok()); t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); @@ -286,7 +283,7 @@ fn successful_execution_with_foreign_code_gives_ok() { ).0; assert!(r.is_ok()); - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let fees = t.execute_with(|| transfer_fee(&xt())); let r = executor_call:: _>( &mut t, @@ -298,7 +295,6 @@ fn successful_execution_with_foreign_code_gives_ok() { assert!(r.is_ok()); t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); @@ -311,7 +307,7 @@ fn full_native_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let mut fees = t.execute_with(|| transfer_fee(&xt())); executor_call:: _>( &mut t, @@ -322,7 +318,6 @@ fn full_native_block_import_works() { ).0.unwrap(); t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); @@ -361,7 +356,7 @@ fn full_native_block_import_works() { assert_eq!(System::events(), events); }); - fm = t.execute_with(TransactionPayment::next_fee_multiplier); + fees = t.execute_with(|| transfer_fee(&xt())); executor_call:: _>( &mut t, @@ -372,7 +367,6 @@ fn full_native_block_import_works() { ).0.unwrap(); t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); assert_eq!( Balances::total_balance(&alice()), alice_last_known_balance - 10 * DOLLARS - fees, @@ -450,7 +444,7 @@ fn full_wasm_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let mut fees = t.execute_with(|| transfer_fee(&xt())); executor_call:: _>( &mut t, @@ -461,12 +455,12 @@ fn full_wasm_block_import_works() { ).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - transfer_fee(&xt(), fm)); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); }); - fm = t.execute_with(TransactionPayment::next_fee_multiplier); + fees = t.execute_with(|| transfer_fee(&xt())); executor_call:: _>( &mut t, @@ -479,11 +473,11 @@ fn full_wasm_block_import_works() { t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - transfer_fee(&xt(), fm), + alice_last_known_balance - 10 * DOLLARS - fees, ); assert_eq!( Balances::total_balance(&bob()), - 179 * DOLLARS - 1 * transfer_fee(&xt(), fm), + 179 * DOLLARS - 1 * fees, ); }); } @@ -755,7 +749,7 @@ fn successful_execution_gives_ok() { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); }); - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let fees = t.execute_with(|| transfer_fee(&xt())); let r = executor_call:: _>( &mut t, @@ -770,7 +764,6 @@ fn successful_execution_gives_ok() { .expect("Extrinsic failed"); t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 280408357e..8f828263c5 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -22,9 +22,9 @@ use frame_support::{ weights::{GetDispatchInfo, constants::ExtrinsicBaseWeight, IdentityFee, WeightToFeePolynomial}, }; use sp_core::NeverNativeValue; -use sp_runtime::{FixedPointNumber, FixedI128, Perbill}; +use sp_runtime::{Perbill, FixedPointNumber}; use node_runtime::{ - CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, + CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, Multiplier, TransactionByteFee, constants::currency::*, }; @@ -38,8 +38,8 @@ use self::common::{*, sign}; fn fee_multiplier_increases_and_decreases_on_big_weight() { let mut t = new_test_ext(COMPACT_CODE, false); - // initial fee multiplier must be zero - let mut prev_multiplier = FixedI128::from_inner(0); + // initial fee multiplier must be one. + let mut prev_multiplier = Multiplier::one(); t.execute_with(|| { assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier); @@ -59,7 +59,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(90))), + function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(60))), } ] ); @@ -122,7 +122,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { } #[test] -fn transaction_fee_is_correct_ultimate() { +fn transaction_fee_is_correct() { // This uses the exact values of substrate-node. // // weight of transfer call as of now: 1_000_000 diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index c8f42f3f26..039093ddee 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -18,11 +18,9 @@ //! Some configurable implementations as associated type for the substrate runtime. use node_primitives::Balance; -use sp_runtime::traits::{Convert, Saturating}; -use sp_runtime::{FixedPointNumber, Perquintill}; -use frame_support::traits::{OnUnbalanced, Currency, Get}; -use pallet_transaction_payment::Multiplier; -use crate::{Balances, System, Authorship, MaximumBlockWeight, NegativeImbalance}; +use sp_runtime::traits::Convert; +use frame_support::traits::{OnUnbalanced, Currency}; +use crate::{Balances, Authorship, NegativeImbalance}; pub struct Author; impl OnUnbalanced for Author { @@ -47,89 +45,63 @@ impl Convert for CurrencyToVoteHandler { fn convert(x: u128) -> Balance { x * Self::factor() } } -/// Update the given multiplier based on the following formula -/// -/// diff = (previous_block_weight - target_weight)/max_weight -/// v = 0.00004 -/// next_weight = weight * (1 + (v * diff) + (v * diff)^2 / 2) -/// -/// Where `target_weight` must be given as the `Get` implementation of the `T` generic type. -/// https://research.web3.foundation/en/latest/polkadot/Token%20Economics/#relay-chain-transaction-fees -pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData); - -impl> Convert for TargetedFeeAdjustment { - fn convert(multiplier: Multiplier) -> Multiplier { - let max_weight = MaximumBlockWeight::get(); - let block_weight = System::block_weight().total().min(max_weight); - let target_weight = (T::get() * max_weight) as u128; - let block_weight = block_weight as u128; - - // determines if the first_term is positive - let positive = block_weight >= target_weight; - let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight); - // safe, diff_abs cannot exceed u64. - let diff = Multiplier::saturating_from_rational(diff_abs, max_weight.max(1)); - let diff_squared = diff.saturating_mul(diff); - - // 0.00004 = 4/100_000 = 40_000/10^9 - let v = Multiplier::saturating_from_rational(4, 100_000); - // 0.00004^2 = 16/10^10 Taking the future /2 into account... 8/10^10 - let v_squared_2 = Multiplier::saturating_from_rational(8, 10_000_000_000u64); - - let first_term = v.saturating_mul(diff); - let second_term = v_squared_2.saturating_mul(diff_squared); - - if positive { - // Note: this is merely bounded by how big the multiplier and the inner value can go, - // not by any economical reasoning. - let excess = first_term.saturating_add(second_term); - multiplier.saturating_add(excess) - } else { - // Defensive-only: first_term > second_term. Safe subtraction. - let negative = first_term.saturating_sub(second_term); - multiplier.saturating_sub(negative) - // despite the fact that apply_to saturates weight (final fee cannot go below 0) - // it is crucially important to stop here and don't further reduce the weight fee - // multiplier. While at -1, it means that the network is so un-congested that all - // transactions have no weight fee. We stop here and only increase if the network - // became more busy. - .max(Multiplier::saturating_from_integer(-1)) - } - } -} - #[cfg(test)] -mod tests { +mod multiplier_tests { use super::*; - use sp_runtime::assert_eq_error_rate; - use crate::{MaximumBlockWeight, AvailableBlockRatio, Runtime}; - use crate::{constants::currency::*, TransactionPayment, TargetBlockFullness}; + use sp_runtime::{assert_eq_error_rate, FixedPointNumber}; + use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + + use crate::{ + constants::{currency::*, time::*}, + TransactionPayment, MaximumBlockWeight, AvailableBlockRatio, Runtime, TargetBlockFullness, + AdjustmentVariable, System, MinimumMultiplier, + }; use frame_support::weights::{Weight, WeightToFeePolynomial}; fn max() -> Weight { - MaximumBlockWeight::get() + AvailableBlockRatio::get() * MaximumBlockWeight::get() + } + + fn min_multiplier() -> Multiplier { + MinimumMultiplier::get() } fn target() -> Weight { TargetBlockFullness::get() * max() } - // poc reference implementation. - fn fee_multiplier_update(block_weight: Weight, previous: Multiplier) -> Multiplier { + // update based on runtime impl. + fn runtime_multiplier_update(fm: Multiplier) -> Multiplier { + TargetedFeeAdjustment::< + Runtime, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + >::convert(fm) + } + + // update based on reference impl. + fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier { + let accuracy = Multiplier::accuracy() as f64; + let previous_float = previous.into_inner() as f64 / accuracy; + // bump if it is zero. + let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); + // maximum tx weight let m = max() as f64; // block weight always truncated to max weight let block_weight = (block_weight as f64).min(m); - let v: f64 = 0.00004; + let v: f64 = AdjustmentVariable::get().to_fraction(); // Ideal saturation in terms of weight let ss = target() as f64; // Current saturation in terms of weight let s = block_weight; - let fm = v * (s/m - ss/m) + v.powi(2) * (s/m - ss/m).powi(2) / 2.0; - let addition_fm = Multiplier::from_inner((fm * Multiplier::accuracy() as f64).round() as i128); - previous.saturating_add(addition_fm) + let t1 = v * (s/m - ss/m); + let t2 = v.powi(2) * (s/m - ss/m).powi(2) / 2.0; + let next_float = previous_float * (1.0 + t1 + t2); + Multiplier::from_fraction(next_float) } fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { @@ -142,11 +114,12 @@ mod tests { } #[test] - fn fee_multiplier_update_poc_works() { - let fm = Multiplier::saturating_from_rational(0, 1); + fn truth_value_update_poc_works() { + let fm = Multiplier::saturating_from_rational(1, 2); let test_set = vec![ (0, fm.clone()), (100, fm.clone()), + (1000, fm.clone()), (target(), fm.clone()), (max() / 2, fm.clone()), (max(), fm.clone()), @@ -154,37 +127,71 @@ mod tests { test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { assert_eq_error_rate!( - fee_multiplier_update(w, fm), - TargetedFeeAdjustment::::convert(fm), - // Error is only 1 in 10^18 - Multiplier::from_inner(1), + truth_value_update(w, fm), + runtime_multiplier_update(fm), + // Error is only 1 in 100^18 + Multiplier::from_inner(100), ); }) }) } #[test] - fn empty_chain_simulation() { - // just a few txs per_block. - let block_weight = 0; - run_with_system_weight(block_weight, || { - let mut fm = Multiplier::default(); + fn multiplier_can_grow_from_zero() { + // if the min is too small, then this will not change, and we are doomed forever. + // the weight is 1/100th bigger than target. + run_with_system_weight(target() * 101 / 100, || { + let next = runtime_multiplier_update(min_multiplier()); + assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); + }) + } + + #[test] + fn multiplier_cannot_go_below_limit() { + // will not go any further below even if block is empty. + run_with_system_weight(0, || { + let next = runtime_multiplier_update(min_multiplier()); + assert_eq!(next, min_multiplier()); + }) + } + + #[test] + fn time_to_reach_zero() { + // blocks per 24h in substrate-node: 28,800 (k) + // s* = 0.1875 + // The bound from the research in an empty chain is: + // v <~ (p / k(0 - s*)) + // p > v * k * -0.1875 + // to get p == -1 we'd need + // -1 > 0.00001 * k * -0.1875 + // 1 < 0.00001 * k * 0.1875 + // 10^9 / 1875 < k + // k > 533_333 ~ 18,5 days. + run_with_system_weight(0, || { + // start from 1, the default. + let mut fm = Multiplier::one(); let mut iterations: u64 = 0; loop { - let next = TargetedFeeAdjustment::::convert(fm); + let next = runtime_multiplier_update(fm); fm = next; - if fm == Multiplier::saturating_from_integer(-1) { break; } + if fm == min_multiplier() { break; } iterations += 1; } - println!("iteration {}, new fm = {:?}. Weight fee is now zero", iterations, fm); - assert!(iterations > 50_000, "This assertion is just a warning; Don't panic. \ - Current substrate/polkadot node are configured with a _slow adjusting fee_ \ - mechanism. Hence, it is really unlikely that fees collapse to zero even on an \ - empty chain in less than at least of couple of thousands of empty blocks. But this \ - simulation indicates that fees collapsed to zero after {} almost-empty blocks. \ - Check it", - iterations, - ); + assert!(iterations > 533_333); + }) + } + + #[test] + fn min_change_per_day() { + run_with_system_weight(max(), || { + let mut fm = Multiplier::one(); + // See the example in the doc of `TargetedFeeAdjustment`. are at least 0.234, hence + // `fm > 1.234`. + for _ in 0..DAYS { + let next = runtime_multiplier_update(fm); + fm = next; + } + assert!(fm > Multiplier::saturating_from_rational(1234, 1000)); }) } @@ -196,17 +203,17 @@ mod tests { // almost full. The entire quota of normal transactions is taken. let block_weight = AvailableBlockRatio::get() * max() - 100; - // Default substrate minimum. - let tx_weight = 10_000; + // Default substrate weight. + let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); run_with_system_weight(block_weight, || { // initial value configured on module - let mut fm = Multiplier::default(); + let mut fm = Multiplier::one(); assert_eq!(fm, TransactionPayment::next_fee_multiplier()); let mut iterations: u64 = 0; loop { - let next = TargetedFeeAdjustment::::convert(fm); + let next = runtime_multiplier_update(fm); // if no change, panic. This should never happen in this case. if fm == next { panic!("The fee should ever increase"); } fm = next; @@ -230,95 +237,86 @@ mod tests { #[test] fn stateless_weight_mul() { - // This test will show that heavy blocks have a weight multiplier greater than 0 - // and light blocks will have a weight multiplier less than 0. + let fm = Multiplier::saturating_from_rational(1, 2); run_with_system_weight(target() / 4, || { - // `fee_multiplier_update` is enough as it is the absolute truth value. - let next = TargetedFeeAdjustment::::convert(Multiplier::default()); - assert_eq!( + let next = runtime_multiplier_update(fm); + assert_eq_error_rate!( next, - fee_multiplier_update(target() / 4 ,Multiplier::default()) + truth_value_update(target() / 4 , fm), + Multiplier::from_inner(100), ); - // Light block. Fee is reduced a little. - assert!(next < Multiplier::zero()) + // Light block. Multiplier is reduced a little. + assert!(next < fm); }); + run_with_system_weight(target() / 2, || { - let next = TargetedFeeAdjustment::::convert(Multiplier::default()); - assert_eq!( + let next = runtime_multiplier_update(fm); + assert_eq_error_rate!( next, - fee_multiplier_update(target() / 2 ,Multiplier::default()) + truth_value_update(target() / 2 , fm), + Multiplier::from_inner(100), ); - - // Light block. Fee is reduced a little. - assert!(next < Multiplier::zero()) + // Light block. Multiplier is reduced a little. + assert!(next < fm); }); run_with_system_weight(target(), || { - // ideal. Original fee. No changes. - let next = TargetedFeeAdjustment::::convert(Multiplier::default()); - assert_eq!(next, Multiplier::zero()) + let next = runtime_multiplier_update(fm); + assert_eq_error_rate!( + next, + truth_value_update(target(), fm), + Multiplier::from_inner(100), + ); + // ideal. No changes. + assert_eq!(next, fm) }); run_with_system_weight(target() * 2, || { // More than ideal. Fee is increased. - let next = TargetedFeeAdjustment::::convert(Multiplier::default()); - assert_eq!( + let next = runtime_multiplier_update(fm); + assert_eq_error_rate!( next, - fee_multiplier_update(target() * 2 ,Multiplier::default()) + truth_value_update(target() * 2 , fm), + Multiplier::from_inner(100), ); // Heavy block. Fee is increased a little. - assert!(next > Multiplier::zero()) + assert!(next > fm); }); } #[test] - fn stateful_weight_mul_grow_to_infinity() { + fn weight_mul_grow_on_big_block() { run_with_system_weight(target() * 2, || { - let mut original = Multiplier::default(); + let mut original = Multiplier::zero(); let mut next = Multiplier::default(); (0..1_000).for_each(|_| { - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( + next = runtime_multiplier_update(original); + assert_eq_error_rate!( next, - fee_multiplier_update(target() * 2, original), + truth_value_update(target() * 2, original), + Multiplier::from_inner(100), ); // must always increase - assert!(next > original); + assert!(next > original, "{:?} !>= {:?}", next, original); original = next; }); }); } #[test] - fn stateful_weight_mil_collapse_to_minus_one() { - run_with_system_weight(0, || { - let mut original = Multiplier::default(); // 0 + fn weight_mul_decrease_on_small_block() { + run_with_system_weight(target() / 2, || { + let mut original = Multiplier::saturating_from_rational(1, 2); let mut next; - // decreases - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( - next, - fee_multiplier_update(0, original), - ); - assert!(next < original); - original = next; - - // keeps decreasing - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( - next, - fee_multiplier_update(0, original), - ); - assert!(next < original); - - // ... stops going down at -1 - assert_eq!( - TargetedFeeAdjustment::::convert(Multiplier::saturating_from_integer(-1)), - Multiplier::saturating_from_integer(-1) - ); + for _ in 0..100 { + // decreases + next = runtime_multiplier_update(original); + assert!(next < original, "{:?} !<= {:?}", next, original); + original = next; + } }) } @@ -347,8 +345,8 @@ mod tests { Weight::max_value(), ].into_iter().for_each(|i| { run_with_system_weight(i, || { - let next = TargetedFeeAdjustment::::convert(Multiplier::default()); - let truth = fee_multiplier_update(i, Multiplier::default()); + let next = runtime_multiplier_update(Multiplier::one()); + let truth = truth_value_update(i, Multiplier::one()); assert_eq_error_rate!(truth, next, Multiplier::from_inner(50_000_000)); }); }); @@ -359,7 +357,7 @@ mod tests { .into_iter() .for_each(|i| { run_with_system_weight(i, || { - let fm = TargetedFeeAdjustment::::convert(max_fm); + let fm = runtime_multiplier_update(max_fm); // won't grow. The convert saturates everything. assert_eq!(fm, max_fm); }) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d776d72e2b..feb1b05a8e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -44,8 +44,8 @@ pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; use sp_api::impl_runtime_apis; use sp_runtime::{ - Permill, Perbill, Perquintill, Percent, PerThing, ApplyExtrinsicResult, - impl_opaque_keys, generic, create_runtime_str, ModuleId, + Permill, Perbill, Perquintill, Percent, ApplyExtrinsicResult, + impl_opaque_keys, generic, create_runtime_str, ModuleId, FixedPointNumber, }; use sp_runtime::curve::PiecewiseLinear; use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; @@ -61,6 +61,7 @@ use pallet_grandpa::fg_primitives; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +pub use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; use pallet_contracts_rpc_runtime_api::ContractExecResult; use pallet_session::{historical as pallet_session_historical}; use sp_inherents::{InherentData, CheckInherentsResult}; @@ -77,7 +78,7 @@ pub use pallet_staking::StakerStatus; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; -use impls::{CurrencyToVoteHandler, Author, TargetedFeeAdjustment}; +use impls::{CurrencyToVoteHandler, Author}; /// Constant values used within the runtime. pub mod constants; @@ -295,23 +296,17 @@ impl pallet_balances::Trait for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); + pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(1, 100_000); + pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); } -// for a sane configuration, this should always be less than `AvailableBlockRatio`. -const_assert!( - TargetBlockFullness::get().deconstruct() < - (AvailableBlockRatio::get().deconstruct() as ::Inner) - * (::ACCURACY / ::ACCURACY as ::Inner) -); - impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = DealWithFees; type TransactionByteFee = TransactionByteFee; - // In the Substrate node, a weight of 10_000_000 (smallest non-zero weight) - // is mapped to 10_000_000 units of fees, hence: type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = TargetedFeeAdjustment; + type FeeMultiplierUpdate = + TargetedFeeAdjustment; } parameter_types! { diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 2724291f14..210c75631d 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -26,6 +26,7 @@ impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { type Trait = (); type Info = frame_support::weights::DispatchInfo; type PostInfo = frame_support::weights::PostDispatchInfo; + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { panic!("Do not use dummy implementation for dispatch."); @@ -37,7 +38,7 @@ macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { use crate::*; - use sp_runtime::{FixedPointNumber, FixedI128, traits::{SignedExtension, BadOrigin}}; + use sp_runtime::{FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ assert_noop, assert_ok, assert_err, traits::{ @@ -45,7 +46,7 @@ macro_rules! decl_tests { Currency, ReservableCurrency, ExistenceRequirement::AllowDeath, StoredMap } }; - use pallet_transaction_payment::ChargeTransactionPayment; + use pallet_transaction_payment::{ChargeTransactionPayment, Multiplier}; use frame_system::RawOrigin; const ID_1: LockIdentifier = *b"1 "; @@ -166,7 +167,7 @@ macro_rules! decl_tests { .monied(true) .build() .execute_with(|| { - pallet_transaction_payment::NextFeeMultiplier::put(FixedI128::saturating_from_integer(1)); + pallet_transaction_payment::NextFeeMultiplier::put(Multiplier::saturating_from_integer(1)); Balances::set_lock(ID_1, &1, 10, WithdrawReason::Reserve.into()); assert_noop!( >::transfer(&1, &2, 1, AllowDeath), diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index b38b8c8a4a..b64b5d58f7 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -570,7 +570,7 @@ decl_module! { /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but // that's not possible at present (since it's within the decl_module macro). - #[weight = (*_ratio * T::MaximumBlockWeight::get(), DispatchClass::Operational)] + #[weight = *_ratio * T::MaximumBlockWeight::get()] fn fill_block(origin, _ratio: Perbill) { ensure_root(origin)?; } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index e1abb00cbf..a8b23bfda0 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.101", optional = true } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } @@ -29,6 +30,7 @@ sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } [features] default = ["std"] std = [ + "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 740fec099d..31d0cfb20d 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -44,7 +44,7 @@ use frame_support::{ dispatch::DispatchResult, }; use sp_runtime::{ - FixedI128, FixedPointNumber, FixedPointOperand, + FixedU128, FixedPointNumber, FixedPointOperand, Perquintill, RuntimeDebug, transaction_validity::{ TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, TransactionValidity, @@ -57,13 +57,125 @@ use sp_runtime::{ use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; /// Fee multiplier. -pub type Multiplier = FixedI128; +pub type Multiplier = FixedU128; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +/// A struct to update the weight multiplier per block. It implements `Convert`, meaning that it can convert the previous multiplier to the next one. This should +/// be called on `on_finalize` of a block, prior to potentially cleaning the weight data from the +/// system module. +/// +/// given: +/// s = previous block weight +/// s'= ideal block weight +/// m = maximum block weight +/// diff = (s - s')/m +/// v = 0.00001 +/// t1 = (v * diff) +/// t2 = (v * diff)^2 / 2 +/// then: +/// next_multiplier = prev_multiplier * (1 + t1 + t2) +/// +/// Where `(s', v)` must be given as the `Get` implementation of the `T` generic type. Moreover, `M` +/// must provide the minimum allowed value for the multiplier. Note that a runtime should ensure +/// with tests that the combination of this `M` and `V` is not such that the multiplier can drop to +/// zero and never recover. +/// +/// note that `s'` is interpreted as a portion in the _normal transaction_ capacity of the block. +/// For example, given `s' == 0.25` and `AvailableBlockRatio = 0.75`, then the target fullness is +/// _0.25 of the normal capacity_ and _0.1875 of the entire block_. +/// +/// This implementation implies the bound: +/// - `v ≤ p / k * (s − s')` +/// - or, solving for `p`: `p >= v * k * (s - s')` +/// +/// where `p` is the amount of change over `k` blocks. +/// +/// Hence: +/// - in a fully congested chain: `p >= v * k * (1 - s')`. +/// - in an empty chain: `p >= v * k * (-s')`. +/// +/// For example, when all blocks are full and there are 28800 blocks per day (default in `substrate-node`) +/// and v == 0.00001, s' == 0.1875, we'd have: +/// +/// p >= 0.00001 * 28800 * 0.8125 +/// p >= 0.234 +/// +/// Meaning that fees can change by around ~23% per day, given extreme congestion. +/// +/// More info can be found at: +/// https://w3f-research.readthedocs.io/en/latest/polkadot/Token%20Economics.html +pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); + +impl Convert for TargetedFeeAdjustment + where T: frame_system::Trait, S: Get, V: Get, M: Get, +{ + fn convert(previous: Multiplier) -> Multiplier { + // Defensive only. The multiplier in storage should always be at most positive. Nonetheless + // we recover here in case of errors, because any value below this would be stale and can + // never change. + let min_multiplier = M::get(); + let previous = previous.max(min_multiplier); + + // the computed ratio is only among the normal class. + let normal_max_weight = + ::AvailableBlockRatio::get() * + ::MaximumBlockWeight::get(); + let normal_block_weight = + >::block_weight() + .get(frame_support::weights::DispatchClass::Normal) + .min(normal_max_weight); + + let s = S::get(); + let v = V::get(); + + let target_weight = (s * normal_max_weight) as u128; + let block_weight = normal_block_weight as u128; + + // determines if the first_term is positive + let positive = block_weight >= target_weight; + let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight); + + // defensive only, a test case assures that the maximum weight diff can fit in Multiplier + // without any saturation. + let diff = Multiplier::saturating_from_rational(diff_abs, normal_max_weight.max(1)); + let diff_squared = diff.saturating_mul(diff); + + let v_squared_2 = v.saturating_mul(v) / Multiplier::saturating_from_integer(2); + + let first_term = v.saturating_mul(diff); + let second_term = v_squared_2.saturating_mul(diff_squared); + + if positive { + let excess = first_term.saturating_add(second_term).saturating_mul(previous); + previous.saturating_add(excess).max(min_multiplier) + } else { + // Defensive-only: first_term > second_term. Safe subtraction. + let negative = first_term.saturating_sub(second_term).saturating_mul(previous); + previous.saturating_sub(negative).max(min_multiplier) + } + } +} + +/// Storage releases of the module. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +enum Releases { + /// Original version of the module. + V1Ancient, + /// One that bumps the usage to FixedU128 from FixedI128. + V2, +} + +impl Default for Releases { + fn default() -> Self { + Releases::V1Ancient + } +} + pub trait Trait: frame_system::Trait { /// The currency type in which fees will be paid. type Currency: Currency + Send + Sync; @@ -85,7 +197,9 @@ pub trait Trait: frame_system::Trait { decl_storage! { trait Store for Module as TransactionPayment { - pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::from_inner(0); + pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::saturating_from_integer(1); + + StorageVersion build(|_: &GenesisConfig| Releases::V2): Releases; } } @@ -103,6 +217,51 @@ decl_module! { *fm = T::FeeMultiplierUpdate::convert(*fm); }); } + + fn integrity_test() { + // given weight == u64, we build multipliers from `diff` of two weight values, which can + // at most be MaximumBlockWeight. Make sure that this can fit in a multiplier without + // loss. + use sp_std::convert::TryInto; + assert!( + ::max_value() >= + Multiplier::checked_from_integer( + ::MaximumBlockWeight::get().try_into().unwrap() + ).unwrap(), + ); + } + + fn on_runtime_upgrade() -> Weight { + use frame_support::migration::take_storage_value; + use sp_std::convert::TryInto; + use frame_support::debug::native::error; + + type OldMultiplier = sp_runtime::FixedI128; + type OldInner = ::Inner; + type Inner = ::Inner; + + if let Releases::V1Ancient = StorageVersion::get() { + StorageVersion::put(Releases::V2); + + if let Some(old) = take_storage_value::( + b"TransactionPayment", + b"NextFeeMultiplier", + &[], + ) { + let inner = old.into_inner(); + let new_inner = >::try_into(inner) + .unwrap_or_default(); + let new = Multiplier::from_inner(new_inner); + NextFeeMultiplier::put(new); + T::DbWeight::get().reads_writes(1, 1) + } else { + error!("transaction-payment migration failed."); + T::DbWeight::get().reads(1) + } + } else { + T::DbWeight::get().reads(1) + } + } } } @@ -157,7 +316,7 @@ impl Module where /// the minimum fee for a transaction to be included in a block. /// /// ```ignore - /// inclusion_fee = base_fee + targeted_fee_adjustment * (len_fee + weight_fee); + /// inclusion_fee = base_fee + len_fee + [targeted_fee_adjustment * weight_fee]; /// final_fee = inclusion_fee + tip; /// ``` pub fn compute_fee( @@ -194,16 +353,21 @@ impl Module where if pays_fee == Pays::Yes { let len = >::from(len); let per_byte = T::TransactionByteFee::get(); - let len_fee = per_byte.saturating_mul(len); - let unadjusted_weight_fee = Self::weight_to_fee(weight); - // the adjustable part of the fee - let adjustable_fee = len_fee.saturating_add(unadjusted_weight_fee); - let targeted_fee_adjustment = NextFeeMultiplier::get(); - let adjusted_fee = targeted_fee_adjustment.saturating_mul_acc_int(adjustable_fee); + // length fee. this is not adjusted. + let fixed_len_fee = per_byte.saturating_mul(len); + + // the adjustable part of the fee. + let unadjusted_weight_fee = Self::weight_to_fee(weight); + let multiplier = Self::next_fee_multiplier(); + // final adjusted weight fee. + let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); let base_fee = Self::weight_to_fee(T::ExtrinsicBaseWeight::get()); - base_fee.saturating_add(adjusted_fee).saturating_add(tip) + base_fee + .saturating_add(fixed_len_fee) + .saturating_add(adjusted_weight_fee) + .saturating_add(tip) } else { tip } @@ -213,12 +377,12 @@ impl Module where impl Module { /// Compute the fee for the specified weight. /// - /// This fee is already adjusted by the per block fee adjustment factor and is therefore - /// the share that the weight contributes to the overall fee of a transaction. + /// This fee is already adjusted by the per block fee adjustment factor and is therefore the + /// share that the weight contributes to the overall fee of a transaction. /// - /// This function is generic in order to supply the contracts module with a way - /// to calculate the gas price. The contracts module is not able to put the necessary - /// `BalanceOf` contraints on its trait. This function is not to be used by this module. + /// This function is generic in order to supply the contracts module with a way to calculate the + /// gas price. The contracts module is not able to put the necessary `BalanceOf` constraints + /// on its trait. This function is not to be used by this module. pub fn weight_to_fee_with_adjustment(weight: Weight) -> Balance where Balance: UniqueSaturatedFrom { @@ -576,6 +740,37 @@ mod tests { PostDispatchInfo { actual_weight: None, } } + #[test] + fn migration_to_v2_works() { + use sp_runtime::FixedI128; + use frame_support::traits::OnRuntimeUpgrade; + + let with_old_multiplier = |mul: FixedI128, expected: FixedU128| { + ExtBuilder::default().build().execute_with(|| { + frame_support::migration::put_storage_value( + b"TransactionPayment", + b"NextFeeMultiplier", + &[], + mul, + ); + + assert_eq!(StorageVersion::get(), Releases::V1Ancient); + + TransactionPayment::on_runtime_upgrade(); + + assert_eq!(StorageVersion::get(), Releases::V2); + assert_eq!(NextFeeMultiplier::get(), expected); + }) + }; + + with_old_multiplier(FixedI128::saturating_from_integer(-1), FixedU128::zero()); + with_old_multiplier(FixedI128::saturating_from_rational(-1, 2), FixedU128::zero()); + with_old_multiplier( + FixedI128::saturating_from_rational(1, 2), + FixedU128::saturating_from_rational(1, 2), + ); + } + #[test] fn signed_extension_transaction_payment_work() { ExtBuilder::default() @@ -620,21 +815,21 @@ mod tests { .execute_with(|| { let len = 10; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); + NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); let pre = ChargeTransactionPayment::::from(5 /* tipped */) .pre_dispatch(&2, CALL, &info_from_weight(100), len) .unwrap(); - // 5 base fee, 3/2 * 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 150 - 5); + // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); assert!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) .is_ok() ); - // 75 (3/2 of the returned 50 units of weight ) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 75 - 5); + // 75 (3/2 of the returned 50 units of weight) is refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); }); } @@ -708,7 +903,7 @@ mod tests { .execute_with(|| { // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); + NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; assert!( @@ -716,7 +911,14 @@ mod tests { .pre_dispatch(&1, CALL, &info_from_weight(3), len) .is_ok() ); - assert_eq!(Balances::free_balance(1), 100 - 10 - 5 - (10 + 3) * 3 / 2); + assert_eq!( + Balances::free_balance(1), + 100 // original + - 10 // tip + - 5 // base + - 10 // len + - (3 * 3 / 2) // adjusted weight + ); }) } @@ -736,7 +938,7 @@ mod tests { .execute_with(|| { // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); + NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); assert_eq!( TransactionPayment::query_info(xt, len), @@ -745,10 +947,8 @@ mod tests { class: info.class, partial_fee: 5 * 2 /* base * weight_fee */ - + ( - len as u64 /* len * 1 */ - + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 /* weight * weight_to_fee */ - ) * 3 / 2 + + len as u64 /* len * 1 */ + + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 * 3 / 2 /* weight */ }, ); @@ -765,7 +965,7 @@ mod tests { .execute_with(|| { // Next fee multiplier is zero - assert_eq!(NextFeeMultiplier::get(), Multiplier::saturating_from_integer(0)); + assert_eq!(NextFeeMultiplier::get(), Multiplier::one()); // Tip only, no fees works let dispatch_info = DispatchInfo { @@ -804,8 +1004,8 @@ mod tests { .build() .execute_with(|| { - // Add a next fee multiplier - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); // = 1/2 = .5 + // Add a next fee multiplier. Fees will be x3/2. + NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { weight: 0, @@ -821,10 +1021,10 @@ mod tests { pays_fee: Pays::Yes, }; // 123 weight, 456 length, 100 base - // adjustable fee = (123 * 1) + (456 * 10) = 4683 - // adjusted fee = (4683 * .5) + 4683 = 7024.5 -> 7024 - // final fee = 100 + 7024 + 789 tip = 7913 - assert_eq!(Module::::compute_fee(456, &dispatch_info, 789), 7913); + assert_eq!( + Module::::compute_fee(456, &dispatch_info, 789), + 100 + (3 * 123 / 2) + 4560 + 789, + ); }); } @@ -837,9 +1037,10 @@ mod tests { .build() .execute_with(|| { - // Add a next fee multiplier - NextFeeMultiplier::put(Multiplier::saturating_from_rational(-1, 2)); // = -1/2 = -.5 - // Base fee is unaffected by multiplier + // Add a next fee multiplier. All fees will be x1/2. + NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); + + // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { weight: 0, class: DispatchClass::Operational, @@ -847,17 +1048,17 @@ mod tests { }; assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - // Everything works together :) + // Everything works together. let dispatch_info = DispatchInfo { weight: 123, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; // 123 weight, 456 length, 100 base - // adjustable fee = (123 * 1) + (456 * 10) = 4683 - // adjusted fee = 4683 - (4683 * -.5) = 4683 - 2341.5 = 4683 - 2341 = 2342 - // final fee = 100 + 2342 + 789 tip = 3231 - assert_eq!(Module::::compute_fee(456, &dispatch_info, 789), 3231); + assert_eq!( + Module::::compute_fee(456, &dispatch_info, 789), + 100 + (123 / 2) + 4560 + 789, + ); }); } @@ -993,7 +1194,7 @@ mod tests { let len = 10; let tip = 5; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 4)); + NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); let pre = ChargeTransactionPayment::::from(tip) .pre_dispatch(&2, CALL, &info, len) @@ -1007,11 +1208,8 @@ mod tests { let actual_fee = Module:: ::compute_actual_fee(len as u32, &info, &post_info, tip); - // 33 weight, 10 length, 7 base - // adjustable fee = (33 * 1) + (10 * 1) = 43 - // adjusted fee = 43 + (43 * .25) = 43 + 10.75 = 43 + 10 = 53 - // final fee = 7 + 53 + 5 tip = 65 - assert_eq!(actual_fee, 65); + // 33 weight, 10 length, 7 base, 5 tip + assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); assert_eq!(refund_based_fee, actual_fee); }); } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 55581ff54c..2362b1e8af 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -372,6 +372,23 @@ macro_rules! implement_fixed { } } + impl $name { + /// const version of `FixedPointNumber::from_inner`. + pub const fn from_inner(inner: $inner_type) -> Self { + Self(inner) + } + + #[cfg(any(feature = "std", test))] + pub fn from_fraction(x: f64) -> Self { + Self((x * (::DIV as f64)) as $inner_type) + } + + #[cfg(any(feature = "std", test))] + pub fn to_fraction(self) -> f64 { + self.0 as f64 / ::DIV as f64 + } + } + impl Saturating for $name { fn saturating_add(self, rhs: Self) -> Self { Self(self.0.saturating_add(rhs.0)) -- GitLab From 8e1f75316d3bc8c1f3d2a667e4e99ca7ea30372b Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Thu, 18 Jun 2020 09:33:51 +0200 Subject: [PATCH 009/144] Restrict remove_proxies (#6383) --- frame/proxy/src/lib.rs | 4 ++++ frame/proxy/src/tests.rs | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 14c7ced151..66e3e76038 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -68,6 +68,8 @@ pub trait Trait: frame_system::Trait { /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. /// The instance filter determines whether a given call may be proxied under this type. + /// + /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + Default; @@ -174,6 +176,8 @@ decl_module! { match c.is_sub_type() { Some(Call::add_proxy(_, ref pt)) | Some(Call::remove_proxy(_, ref pt)) if !proxy_type.is_superset(&pt) => false, + Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) + if proxy_type != T::ProxyType::default() => false, _ => proxy_type.filter(c) } }); diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index be99e9424a..72c9c0d577 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -154,6 +154,7 @@ type Proxy = Module; use frame_system::Call as SystemCall; use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; +use pallet_balances::Event as BalancesEvent; use pallet_utility::Call as UtilityCall; use pallet_utility::Event as UtilityEvent; use super::Call as ProxyCall; @@ -242,6 +243,14 @@ fn filtering_works() { UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), RawEvent::ProxyExecuted(Ok(())).into(), ]); + + let call = Box::new(Call::Proxy(ProxyCall::remove_proxies())); + assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + expect_events(vec![BalancesEvent::::Unreserved(1, 5).into(), RawEvent::ProxyExecuted(Ok(())).into()]); }); } -- GitLab From a2653e87e05b7666a45b2a1a53fd25967807575f Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 18 Jun 2020 09:34:53 +0200 Subject: [PATCH 010/144] Remove penalty on duplicate Status message (#6377) --- client/network/src/protocol.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 83f459d344..764c416495 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -104,8 +104,6 @@ mod rep { pub const CLOGGED_PEER: Rep = Rep::new(-(1 << 12), "Clogged message queue"); /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); - /// Reputation change when a peer sends us a status message while we already received one. - pub const UNEXPECTED_STATUS: Rep = Rep::new(-(1 << 20), "Unexpected status message"); /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); /// Reputation change when a peer sends us any extrinsic. @@ -979,12 +977,7 @@ impl Protocol { trace!(target: "sync", "New peer {} {:?}", who, status); let _protocol_version = { if self.context_data.peers.contains_key(&who) { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, - "Unexpected status packet from {}", who - ); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_STATUS); + debug!(target: "sync", "Ignoring duplicate status packet from {}", who); return CustomMessageOutcome::None; } if status.genesis_hash != self.genesis_hash { -- GitLab From d6d688c4952a99dbfd2155784c937b42ec899da7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 18 Jun 2020 09:35:18 +0200 Subject: [PATCH 011/144] `decl_module!` print better error on duplicate reserved keyword (#6384) * `decl_module!` print better error on duplicate reserved keyword This prints a better error message on duplicated reserved keywords, instead of complaining because of missing `origin`. * Review feedback --- frame/support/src/dispatch.rs | 140 +++++++++++++++++- frame/support/test/tests/decl_module_ui.rs | 26 ++++ ...served_keyword_two_times_integrity_test.rs | 7 + ...ed_keyword_two_times_integrity_test.stderr | 25 ++++ ...eserved_keyword_two_times_on_initialize.rs | 11 ++ ...ved_keyword_two_times_on_initialize.stderr | 25 ++++ 6 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 frame/support/test/tests/decl_module_ui.rs create mode 100644 frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs create mode 100644 frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr create mode 100644 frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs create mode 100644 frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 094cbce263..edb6e62639 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -399,6 +399,29 @@ macro_rules! decl_module { "`deposit_event` function is reserved and must follow the syntax: `$vis:vis fn deposit_event() = default;`" ); }; + // Compile error on `deposit_event` being added a second time. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )+ } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + $vis:vis fn deposit_event() = default; + $($rest:tt)* + ) => { + compile_error!("`deposit_event` can only be passed once as input."); + }; // Add on_finalize (@normalize $(#[$attr:meta])* @@ -462,6 +485,30 @@ macro_rules! decl_module { `on_initialize` or `on_runtime_upgrade` instead" ); }; + // Compile error on `on_finalize` being added a second time. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )+ } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + #[weight = $weight:expr] + fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } + $($rest:tt)* + ) => { + compile_error!("`on_finalize` can only be passed once as input."); + }; // compile_error on_runtime_upgrade, without a given weight removed syntax. (@normalize $(#[$attr:meta])* @@ -554,6 +601,29 @@ macro_rules! decl_module { $($rest)* ); }; + // Compile error on `on_runtime_upgrade` being added a second time. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )+ } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* + ) => { + compile_error!("`on_runtime_upgrade` can only be passed once as input."); + }; // Add integrity_test (@normalize $(#[$attr:meta])* @@ -595,6 +665,29 @@ macro_rules! decl_module { $($rest)* ); }; + // Compile error on `integrity_test` being added a second time. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )+ } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn integrity_test() { $( $impl:tt )* } + $($rest:tt)* + ) => { + compile_error!("`integrity_test` can only be passed once as input."); + }; // compile_error on_initialize, without a given weight removed syntax. (@normalize $(#[$attr:meta])* @@ -687,6 +780,29 @@ macro_rules! decl_module { $($rest)* ); }; + // Compile error on trying to add a second `on_initialize`. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )+ } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* + ) => { + compile_error!("`on_initialize` can only be passed once as input."); + }; (@normalize $(#[$attr:meta])* pub struct $mod_type:ident< @@ -727,7 +843,29 @@ macro_rules! decl_module { $($rest)* ); }; - + // Compile error on trying to add a second `offchain_worker`. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )+ } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn offchain_worker( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* + ) => { + compile_error!("`offchain_worker` can only be passed once as input."); + }; // This puts a constant in the parsed constants list. (@normalize $(#[$attr:meta])* diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs new file mode 100644 index 0000000000..90d105e7cf --- /dev/null +++ b/frame/support/test/tests/decl_module_ui.rs @@ -0,0 +1,26 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//#[rustversion::attr(not(stable), ignore)] +#[test] +fn decl_module_ui() { + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/decl_module_ui/*.rs"); +} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs new file mode 100644 index 0000000000..4dbae05f07 --- /dev/null +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -0,0 +1,7 @@ +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn integrity_test() {} + + fn integrity_test() {} + } +} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr new file mode 100644 index 0000000000..d6498961d3 --- /dev/null +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -0,0 +1,25 @@ +error: `integrity_test` can only be passed once as input. + --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + | +1 | / frame_support::decl_module! { +2 | | pub struct Module for enum Call where origin: T::Origin { +3 | | fn integrity_test() {} +4 | | +5 | | fn integrity_test() {} +6 | | } +7 | | } + | |_^ + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0601]: `main` function not found in crate `$CRATE` + --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 + | +1 | / frame_support::decl_module! { +2 | | pub struct Module for enum Call where origin: T::Origin { +3 | | fn integrity_test() {} +4 | | +5 | | fn integrity_test() {} +6 | | } +7 | | } + | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs` diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs new file mode 100644 index 0000000000..4f05134997 --- /dev/null +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -0,0 +1,11 @@ +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn on_initialize() -> Weight { + 0 + } + + fn on_initialize() -> Weight { + 0 + } + } +} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr new file mode 100644 index 0000000000..8a9f025046 --- /dev/null +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -0,0 +1,25 @@ +error: `on_initialize` can only be passed once as input. + --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 + | +1 | / frame_support::decl_module! { +2 | | pub struct Module for enum Call where origin: T::Origin { +3 | | fn on_initialize() -> Weight { +4 | | 0 +... | +10 | | } +11 | | } + | |_^ + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0601]: `main` function not found in crate `$CRATE` + --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 + | +1 | / frame_support::decl_module! { +2 | | pub struct Module for enum Call where origin: T::Origin { +3 | | fn on_initialize() -> Weight { +4 | | 0 +... | +10 | | } +11 | | } + | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs` -- GitLab From f8afa5203f41c0f71e40cf8af0ef97d5e38a3e1e Mon Sep 17 00:00:00 2001 From: Shaopeng Wang Date: Thu, 18 Jun 2020 19:35:49 +1200 Subject: [PATCH 012/144] FixedPointNumber: zero is not positive. (#6385) --- primitives/arithmetic/src/fixed_point.rs | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 2362b1e8af..8653ee2c8f 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -214,12 +214,12 @@ pub trait FixedPointNumber: self.into_inner() == Self::Inner::one() } - /// Checks if the number is positive. + /// Returns `true` if `self` is positive and `false` if the number is zero or negative. fn is_positive(self) -> bool { - self.into_inner() >= Self::Inner::zero() + self.into_inner() > Self::Inner::zero() } - /// Checks if the number is negative. + /// Returns `true` if `self` is negative and `false` if the number is zero or positive. fn is_negative(self) -> bool { self.into_inner() < Self::Inner::zero() } @@ -1393,6 +1393,23 @@ macro_rules! implement_fixed { assert_eq!(d.checked_div(&$name::zero()), None); } + #[test] + fn is_positive_negative_works() { + let one = $name::one(); + assert!(one.is_positive()); + assert!(!one.is_negative()); + + let zero = $name::zero(); + assert!(!zero.is_positive()); + assert!(!zero.is_negative()); + + if $signed { + let minus_one = $name::saturating_from_integer(-1); + assert!(minus_one.is_negative()); + assert!(!minus_one.is_positive()); + } + } + #[test] fn trunc_works() { let n = $name::saturating_from_rational(5, 2).trunc(); -- GitLab From b02101e9f9424251cb9f965dbf6ff66268d4fa93 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 18 Jun 2020 09:36:52 +0200 Subject: [PATCH 013/144] Allow empty values in the storage (#6364) * Allow empty values in the storage * Bump trie-bench * Bump trie-bench --- Cargo.lock | 12 ++++----- frame/executive/src/lib.rs | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/state-machine/src/lib.rs | 36 +++++++++++++++++++++++++ primitives/state-machine/src/testing.rs | 6 ++--- primitives/trie/Cargo.toml | 6 ++--- primitives/trie/src/lib.rs | 1 + test-utils/runtime/Cargo.toml | 4 +-- 8 files changed, 53 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aeacd6e353..f67d22aa6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3057,9 +3057,9 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be512cb2ccb4ecbdca937fdd4a62ea5f09f8e7195466a85e4632b3d5bcce82e6" +checksum = "fb2999ff7a65d5a1d72172f6d51fa2ea03024b51aee709ba5ff81c3c629a2410" dependencies = [ "ahash", "hash-db", @@ -8935,9 +8935,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48b309cdda1abbdada28424bdc46f8b85362b3e66d6786d91223e83874429c7" +checksum = "ed8419971832eb3333dc26066e50943a20e0934efeb451b3df5ee94f7f7323ab" dependencies = [ "criterion 0.2.11", "hash-db", @@ -8951,9 +8951,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc309f34008563989045a4c4dbcc5770467f3a3785ee80a9b5cc0d83362475f" +checksum = "cb230c24c741993b04cfccbabb45acff6f6480c5f00d3ed8794ea43db3a9d727" dependencies = [ "hash-db", "hashbrown", diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c6371d914a..9b0e4eab02 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -709,7 +709,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("05a38fa4a48ca80ffa8482304be7749a484dc8c9c31462a570d0fbadde6a3633").into(), + state_root: hex!("e8ff7b3dd4375f6f3a76e24a1999e2a7be2d15b353e49ac94ace1eae3e80eb87").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 22dc73fc7e..b94195db90 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" parking_lot = "0.10.0" hash-db = "0.15.2" -trie-db = "0.20.1" +trie-db = "0.21.0" trie-root = "0.16.0" sp-trie = { version = "2.0.0-rc3", path = "../trie" } sp-core = { version = "2.0.0-rc3", path = "../core" } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 693a7bc12f..b863d155e7 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1306,4 +1306,40 @@ mod tests { } assert!(!duplicate); } + + #[test] + fn set_storage_empty_allowed() { + let initial: BTreeMap<_, _> = map![ + b"aaa".to_vec() => b"0".to_vec(), + b"bbb".to_vec() => b"".to_vec() + ]; + let mut state = InMemoryBackend::::from(initial); + let backend = state.as_trie_backend().unwrap(); + + let mut overlay = OverlayedChanges::default(); + overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())); + assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); + overlay.commit_prospective(); + assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); + assert_eq!(overlay.storage(b"bbb"), None); + + { + let mut offchain_overlay = Default::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut offchain_overlay, + &mut cache, + backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.storage(b"bbb"), Some(vec![])); + assert_eq!(ext.storage(b"ccc"), Some(vec![])); + ext.clear_storage(b"ccc"); + assert_eq!(ext.storage(b"ccc"), None); + } + overlay.commit_prospective(); + assert_eq!(overlay.storage(b"ccc"), Some(None)); + } } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 2ea2961830..90da547983 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -242,7 +242,7 @@ impl sp_externalities::ExtensionStore for TestExternalities where #[cfg(test)] mod tests { use super::*; - use sp_core::traits::Externalities; + use sp_core::{H256, traits::Externalities}; use sp_runtime::traits::BlakeTwo256; use hex_literal::hex; @@ -253,8 +253,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("555d4777b52e9196e3f6373c556cc661e79cd463f881ab9e921e70fc30144bf4"); - assert_eq!(&ext.storage_root()[..], &ROOT); + let root = H256::from(hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8")); + assert_eq!(H256::from_slice(ext.storage_root().as_slice()), root); } #[test] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index c436092c09..823d5bc5df 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -20,13 +20,13 @@ harness = false codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.20.1", default-features = false } +trie-db = { version = "0.21.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.20.0", default-features = false } +memory-db = { version = "0.21.0", default-features = false } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.21.0" +trie-bench = "0.22.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index db471fd713..7d1879a4f9 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -51,6 +51,7 @@ pub struct Layout(sp_std::marker::PhantomData); impl TrieLayout for Layout { const USE_EXTENSION: bool = false; + const ALLOW_EMPTY: bool = true; type Hash = H; type Codec = NodeCodec; } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index a4e4bd1f16..9016ddbff5 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "1.3.0", default-features = frame-executive = { version = "2.0.0-rc3", default-features = false, path = "../../frame/executive" } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.20.0", default-features = false } +memory-db = { version = "0.21.0", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc3"} sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } @@ -39,7 +39,7 @@ pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = ".. sp-finality-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.20.1", default-features = false } +trie-db = { version = "0.21.0", default-features = false } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.8.0-rc3", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } -- GitLab From bd72cb62a9c2d8272c577e37e8464b8ba227f8c3 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 18 Jun 2020 09:42:31 +0200 Subject: [PATCH 014/144] Pallet: Atomic Swap (#6349) * Init atomic swap pallet * Implement module swap operations * Add successful swap test * Bump node spec_version * Fix storage name * Add ProofLimit parameter to prevent proof size being too large * Add missing events * Basic weight support * Add basic docs * Mark swap on claim This handles the additional case if `repatriate_reserved` fails. * Add additional expire handler * Update frame/atomic-swap/src/lib.rs Co-authored-by: Shawn Tabrizi * Add docs on ProofLimit * Fix test * Return Ok(()) even when the transfer fails Because we need to mark the swap as claimed no matter what. * Remove retry logic It's overkill. Swap is about something being executed, not necessarily successful. Although there should be logic (reserve and unreserve) to make it so that both parties *believes* that the execution is successful. * succeed -> succeeded * Add docs on duration -- revealer should use duration shorter than counterparty * Missing trait type Co-authored-by: Shawn Tabrizi --- Cargo.lock | 15 ++ Cargo.toml | 1 + bin/node/runtime/src/lib.rs | 4 +- frame/atomic-swap/Cargo.toml | 39 ++++++ frame/atomic-swap/src/lib.rs | 248 +++++++++++++++++++++++++++++++++ frame/atomic-swap/src/tests.rs | 155 +++++++++++++++++++++ 6 files changed, 460 insertions(+), 2 deletions(-) create mode 100644 frame/atomic-swap/Cargo.toml create mode 100644 frame/atomic-swap/src/lib.rs create mode 100644 frame/atomic-swap/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index f67d22aa6e..764157e847 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3845,6 +3845,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-atomic-swap" +version = "2.0.0-rc3" +dependencies = [ + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-aura" version = "2.0.0-rc3" diff --git a/Cargo.toml b/Cargo.toml index d3004fcadc..d1c7339b99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ members = [ "utils/wasm-builder-runner", "frame/assets", "frame/aura", + "frame/atomic-swap", "frame/authority-discovery", "frame/authorship", "frame/babe", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index feb1b05a8e..cf3d262298 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -97,8 +97,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 252, - impl_version: 1, + spec_version: 253, + impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml new file mode 100644 index 0000000000..be197096e7 --- /dev/null +++ b/frame/atomic-swap/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pallet-atomic-swap" +version = "2.0.0-rc3" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME atomic swap pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } + +[dev-dependencies] +pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "frame-support/std", + "frame-system/std", + "sp-runtime/std", + "sp-std/std", + "sp-io/std", + "sp-core/std", + "pallet-balances/std", +] diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs new file mode 100644 index 0000000000..aa33c9a849 --- /dev/null +++ b/frame/atomic-swap/src/lib.rs @@ -0,0 +1,248 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Atomic swap support pallet + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod tests; + +use sp_std::prelude::*; +use sp_io::hashing::blake2_256; +use frame_support::{ + decl_module, decl_storage, decl_event, decl_error, ensure, + traits::{Get, Currency, ReservableCurrency, BalanceStatus}, + weights::Weight, + dispatch::DispatchResult, +}; +use frame_system::{self as system, ensure_signed}; +use codec::{Encode, Decode}; +use sp_runtime::RuntimeDebug; + +/// Pending atomic swap operation. +#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode)] +pub struct PendingSwap { + /// Source of the swap. + pub source: AccountId, + /// Balance value of the swap. + pub balance: Balance, + /// End block of the lock. + pub end_block: BlockNumber, +} + +/// Balance type from the pallet's point of view. +pub type BalanceFor = <::Currency as Currency<::AccountId>>::Balance; + +/// AccountId type from the pallet's point of view. +pub type AccountIdFor = ::AccountId; + +/// BlockNumber type from the pallet's point of view. +pub type BlockNumberFor = ::BlockNumber; + +/// PendingSwap type from the pallet's point of view. +pub type PendingSwapFor = PendingSwap, BalanceFor, BlockNumberFor>; + +/// Hashed proof type. +pub type HashedProof = [u8; 32]; + +/// Atomic swap's pallet configuration trait. +pub trait Trait: frame_system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; + /// The currency mechanism. + type Currency: ReservableCurrency; + /// Limit of proof size. + /// + /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs + /// on-chain. If A is the one that generates the proof, then it requires that either: + /// - A's blockchain has the same proof length limit as B's blockchain. + /// - Or A's blockchain has shorter proof length limit as B's blockchain. + /// + /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse + /// to accept the atomic swap request if A generates the proof, and asks that B generates the + /// proof instead. + type ProofLimit: Get; +} + +decl_storage! { + trait Store for Module as AtomicSwap { + pub PendingSwaps: double_map + hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof + => Option>; + } +} + +decl_error! { + pub enum Error for Module { + /// Swap already exists. + AlreadyExist, + /// Swap proof is invalid. + InvalidProof, + /// Proof is too large. + ProofTooLarge, + /// Source does not match. + SourceMismatch, + /// Swap has already been claimed. + AlreadyClaimed, + /// Swap does not exist. + NotExist, + /// Duration has not yet passed for the swap to be cancelled. + DurationNotPassed, + } +} + +decl_event!( + /// Event of atomic swap pallet. + pub enum Event where + Balance = BalanceFor, + AccountId = AccountIdFor, + PendingSwap = PendingSwapFor, + { + /// Swap created. + NewSwap(AccountId, HashedProof, PendingSwap), + /// Swap claimed. The last parameter indicates whether the execution succeeds. + SwapClaimed(AccountId, HashedProof, Balance, bool), + /// Swap cancelled. + SwapCancelled(AccountId, HashedProof), + } +); + +decl_module! { + /// Module definition of atomic swap pallet. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Register a new atomic swap, declaring an intention to send funds from origin to target + /// on the current blockchain. The target can claim the fund using the revealed proof. If + /// the fund is not claimed after `duration` blocks, then the sender can cancel the swap. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `target`: Receiver of the atomic swap. + /// - `hashed_proof`: The blake2_256 hash of the secret proof. + /// - `balance`: Funds to be sent from origin. + /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended + /// that the revealer uses a shorter duration than the counterparty, to prevent the + /// situation where the revealer reveals the proof too late around the end block. + #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] + fn create_swap( + origin, + target: AccountIdFor, + hashed_proof: HashedProof, + balance: BalanceFor, + duration: BlockNumberFor, + ) { + let source = ensure_signed(origin)?; + ensure!( + !PendingSwaps::::contains_key(&target, hashed_proof), + Error::::AlreadyExist + ); + + T::Currency::reserve(&source, balance)?; + + let swap = PendingSwap { + source, + balance, + end_block: frame_system::Module::::block_number() + duration, + }; + PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); + + Self::deposit_event( + RawEvent::NewSwap(target, hashed_proof, swap) + ); + } + + /// Claim an atomic swap. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `proof`: Revealed proof of the claim. + #[weight = T::DbWeight::get().reads_writes(2, 2) + .saturating_add(40_000_000) + .saturating_add((proof.len() as Weight).saturating_mul(100)) + ] + fn claim_swap( + origin, + proof: Vec, + ) -> DispatchResult { + ensure!( + proof.len() <= T::ProofLimit::get() as usize, + Error::::ProofTooLarge, + ); + + let target = ensure_signed(origin)?; + let hashed_proof = blake2_256(&proof); + + let swap = PendingSwaps::::get(&target, hashed_proof) + .ok_or(Error::::InvalidProof)?; + + let succeeded = T::Currency::repatriate_reserved( + &swap.source, + &target, + swap.balance, + BalanceStatus::Free, + ).is_ok(); + + PendingSwaps::::remove(target.clone(), hashed_proof.clone()); + + Self::deposit_event( + RawEvent::SwapClaimed(target, hashed_proof, swap.balance, succeeded) + ); + + Ok(()) + } + + /// Cancel an atomic swap. Only possible after the originally set duration has passed. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `target`: Target of the original atomic swap. + /// - `hashed_proof`: Hashed proof of the original atomic swap. + #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] + fn cancel_swap( + origin, + target: AccountIdFor, + hashed_proof: HashedProof, + ) { + let source = ensure_signed(origin)?; + + let swap = PendingSwaps::::get(&target, hashed_proof) + .ok_or(Error::::NotExist)?; + ensure!( + swap.source == source, + Error::::SourceMismatch, + ); + ensure!( + frame_system::Module::::block_number() >= swap.end_block, + Error::::DurationNotPassed, + ); + + T::Currency::unreserve( + &swap.source, + swap.balance, + ); + PendingSwaps::::remove(&target, hashed_proof.clone()); + + Self::deposit_event( + RawEvent::SwapCancelled(target, hashed_proof) + ); + } + } +} diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs new file mode 100644 index 0000000000..72db841de1 --- /dev/null +++ b/frame/atomic-swap/src/tests.rs @@ -0,0 +1,155 @@ +#![cfg(test)] + +use super::*; + +use frame_support::{ + impl_outer_origin, parameter_types, weights::Weight, +}; +use sp_core::H256; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use sp_runtime::{ + Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +// For testing the pallet, we construct most of a mock runtime. This means +// first constructing a configuration type (`Test`) which `impl`s each of the +// configuration traits of pallets we want to use. +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} +parameter_types! { + pub const ProofLimit: u32 = 1024; + pub const ExpireDuration: u64 = 100; +} +impl Trait for Test { + type Event = (); + type Currency = Balances; + type ProofLimit = ProofLimit; +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type AtomicSwap = Module; + +const A: u64 = 1; +const B: u64 = 2; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let genesis = pallet_balances::GenesisConfig:: { + balances: vec![ + (A, 100), + (B, 200), + ], + }; + genesis.assimilate_storage(&mut t).unwrap(); + t.into() +} + +#[test] +fn two_party_successful_swap() { + let mut chain1 = new_test_ext(); + let mut chain2 = new_test_ext(); + + // A generates a random proof. Keep it secret. + let proof: [u8; 2] = [4, 2]; + // The hashed proof is the blake2_256 hash of the proof. This is public. + let hashed_proof = blake2_256(&proof); + + // A creates the swap on chain1. + chain1.execute_with(|| { + AtomicSwap::create_swap( + Origin::signed(A), + B, + hashed_proof.clone(), + 50, + 1000, + ).unwrap(); + + assert_eq!(Balances::free_balance(A), 100 - 50); + assert_eq!(Balances::free_balance(B), 200); + }); + + // B creates the swap on chain2. + chain2.execute_with(|| { + AtomicSwap::create_swap( + Origin::signed(B), + A, + hashed_proof.clone(), + 75, + 1000, + ).unwrap(); + + assert_eq!(Balances::free_balance(A), 100); + assert_eq!(Balances::free_balance(B), 200 - 75); + }); + + // A reveals the proof and claims the swap on chain2. + chain2.execute_with(|| { + AtomicSwap::claim_swap( + Origin::signed(A), + proof.to_vec(), + ).unwrap(); + + assert_eq!(Balances::free_balance(A), 100 + 75); + assert_eq!(Balances::free_balance(B), 200 - 75); + }); + + // B use the revealed proof to claim the swap on chain1. + chain1.execute_with(|| { + AtomicSwap::claim_swap( + Origin::signed(B), + proof.to_vec(), + ).unwrap(); + + assert_eq!(Balances::free_balance(A), 100 - 50); + assert_eq!(Balances::free_balance(B), 200 + 50); + }); +} -- GitLab From 81ba3e2809056899c050c29154a1bc6b25653b48 Mon Sep 17 00:00:00 2001 From: mattrutherford <44339188+mattrutherford@users.noreply.github.com> Date: Thu, 18 Jun 2020 08:44:03 +0100 Subject: [PATCH 015/144] Runtime interface to add support for tracing from wasm (#6381) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add span recording to tracing implementation * Add tracing proxy * switch to rustc_hash::FxHashMap * Replace lazy_static and hashmap with thread_local and vec. * fix marking valid span as invalid while removing invalid spans * refactor, add wasm_tracing module in `support` * update registered spans * tidy up * typos * refactor * update flag name to signal lost trace - `is_valid_trace` * update flag name to signal lost trace - `is_valid_trace` * update docs * update docs * Use tracing Field recording to store the actual `name` and `target` from wasm traces. * fix debug log in subscriber + small refactor * add tests * handle misuse in case trying to exit span not held * Implement filter for wasm traces, simplify field recording for primitive types * remove superfluous warning * update docs * Update primitives/tracing/src/proxy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Bastian Köcher * update docs, apply suggestions * move Proxy from thread_local to `Extension`, rename macro * fix test * unify native & wasm span macro calls * implement wasm tracing control facility in primitives and frame * add cli flag `--wasm-tracing` * fix * switch to `Option` (possible performance degradation), switch to static mut bool * performance improvement using u64 vs Option * performance improvement moving concat to client * update docs * Update client/cli/src/params/import_params.rs Co-authored-by: Cecile Tonglet * performance improvement * Revert "performance improvement" This reverts commit cff0aa2670cd1d380f1893f0a6f4d498b384e7b7. * small refactor * formatting * bump impl_version * Update client/cli/src/config.rs Co-authored-by: Bastian Köcher * update docs * small fixes, remove pub static * nit * add integration tests and refactor Subscriber * tests * revert formatting * try fix test that works locally but not in CI * try fix test that works locally but not in CI * debug test that works locally but not in CI * fix test that works locally but not in CI * remove pub visibility from bool in runtime * make TracingSpanGuard #[cfg(not(feature = "std"))], update docs, comments * make TracingProxy drop implementation conditional on !empty state * add docs for TraceHandler * remove blank line * update expect message * update tests * rename cli option to tracing_enable_wasm * rename cli option to tracing_enable_wasm * fix * ensure wasm-tracing features are wasm only * bump impl_version * bump impl_version * add `"pallet-scheduler/std"` to `[features]` `std` in node/runtime * refactor service to remove sp_tracing dependency * refactor: line width, trait bounds * improve LogTraceHandler output * fix test * improve tracing log output * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Bastian Köcher * swap wasm indication from trace name to a separate value * Update client/tracing/src/lib.rs * add docs * remove runtime features remove wasm_tracing option from CLI remove wasm_tracing flag from ProfilingSubscriber Co-authored-by: Matt Rutherford Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher Co-authored-by: Cecile Tonglet --- Cargo.lock | 38 ++- bin/node/runtime/Cargo.toml | 1 + client/executor/Cargo.toml | 3 + client/executor/runtime-test/src/lib.rs | 10 +- client/executor/src/integration_tests/mod.rs | 99 ++++++++ client/tracing/Cargo.toml | 2 + client/tracing/src/lib.rs | 237 +++++++++++++------ primitives/io/Cargo.toml | 1 + primitives/io/src/lib.rs | 52 +++- primitives/tracing/Cargo.toml | 4 +- primitives/tracing/src/lib.rs | 34 ++- primitives/tracing/src/proxy.rs | 165 +++++++++++++ 12 files changed, 561 insertions(+), 85 deletions(-) create mode 100644 primitives/tracing/src/proxy.rs diff --git a/Cargo.lock b/Cargo.lock index 764157e847..2c1d3e2c4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5636,6 +5636,27 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "rental" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8545debe98b2b139fb04cad8618b530e9b07c152d99a5de83c860b877d67847f" +dependencies = [ + "rental-impl", + "stable_deref_trait", +] + +[[package]] +name = "rental-impl" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" +dependencies = [ + "proc-macro2", + "quote 1.0.6", + "syn 1.0.17", +] + [[package]] name = "ring" version = "0.16.12" @@ -6246,6 +6267,7 @@ dependencies = [ "sc-executor-wasmi", "sc-executor-wasmtime", "sc-runtime-test", + "sc-tracing", "sp-api", "sp-core", "sp-externalities", @@ -6255,11 +6277,13 @@ dependencies = [ "sp-runtime-interface", "sp-serializer", "sp-state-machine", + "sp-tracing", "sp-trie", "sp-version", "sp-wasm-interface", "substrate-test-runtime", "test-case", + "tracing", "wabt", "wasmi", ] @@ -6820,10 +6844,12 @@ dependencies = [ "erased-serde", "log", "parking_lot 0.10.2", + "rustc-hash", "sc-telemetry", "serde", "serde_json", "slog", + "sp-tracing", "tracing", "tracing-core", ] @@ -7581,6 +7607,7 @@ dependencies = [ "sp-runtime-interface", "sp-state-machine", "sp-std", + "sp-tracing", "sp-trie", "sp-wasm-interface", ] @@ -7856,6 +7883,8 @@ dependencies = [ name = "sp-tracing" version = "2.0.0-rc3" dependencies = [ + "log", + "rental", "tracing", ] @@ -8914,9 +8943,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab" +checksum = "a7c6b59d116d218cb2d990eb06b77b64043e0268ef7323aae63d8b30ae462923" dependencies = [ "cfg-if", "tracing-attributes", @@ -8925,10 +8954,11 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" +checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" dependencies = [ + "proc-macro2", "quote 1.0.6", "syn 1.0.17", ] diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ebe3196dd7..7cc4018fb6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -111,6 +111,7 @@ std = [ "pallet-membership/std", "pallet-multisig/std", "pallet-identity/std", + "pallet-scheduler/std", "node-primitives/std", "sp-offchain/std", "pallet-offences/std", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 9eee3de1e2..96d2d9eb94 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -44,6 +44,9 @@ substrate-test-runtime = { version = "2.0.0-rc3", path = "../../test-utils/runti sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } test-case = "0.3.3" sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc3", path = "../../primitives/tracing" } +sc-tracing = { version = "2.0.0-rc3", path = "../tracing" } +tracing = "0.1.14" [features] default = [ "std" ] diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index dc6bab759e..4962c558ea 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -10,7 +10,7 @@ use sp_std::{vec::Vec, vec}; #[cfg(not(feature = "std"))] use sp_io::{ storage, hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, - crypto::{ed25519_verify, sr25519_verify}, + crypto::{ed25519_verify, sr25519_verify}, wasm_tracing, }; #[cfg(not(feature = "std"))] use sp_runtime::{print, traits::{BlakeTwo256, Hash}}; @@ -246,6 +246,14 @@ sp_core::wasm_export_functions! { sp_allocator::FreeingBumpHeapAllocator::new(0); } + fn test_enter_span() -> u64 { + wasm_tracing::enter_span("integration_test_span_target", "integration_test_span_name") + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit_span(span_id) + } + fn returns_mutable_static() -> u64 { unsafe { MUTABLE_STATIC += 1; diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 80b123ed4b..f07e98178b 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -658,3 +658,102 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { t.join().unwrap(); } } + +#[test_case(WasmExecutionMethod::Interpreted)] +fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { + + use std::sync::{Arc, Mutex}; + + use sc_tracing::SpanDatum; + + impl sc_tracing::TraceHandler for TestTraceHandler { + fn process_span(&self, sd: SpanDatum) { + self.0.lock().unwrap().push(sd); + } + } + + struct TestTraceHandler(Arc>>); + + let traces = Arc::new(Mutex::new(Vec::new())); + let handler = TestTraceHandler(traces.clone()); + + // Create subscriber with wasm_tracing disabled + let test_subscriber = sc_tracing::ProfilingSubscriber::new_with_handler( + Box::new(handler), "integration_test_span_target"); + + let _guard = tracing::subscriber::set_default(test_subscriber); + + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + // Test tracing disabled + assert!(!sp_tracing::wasm_tracing_enabled()); + + let span_id = call_in_wasm( + "test_enter_span", + &[], + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!( + 0u64.encode(), + span_id + ); + // Repeat to check span id always 0 when deactivated + let span_id = call_in_wasm( + "test_enter_span", + &[], + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!( + 0u64.encode(), + span_id + ); + + call_in_wasm( + "test_exit_span", + &span_id.encode(), + wasm_method, + &mut ext, + ).unwrap(); + // Check span has not been recorded + let len = traces.lock().unwrap().len(); + assert_eq!(len, 0); + + // Test tracing enabled + sp_tracing::set_wasm_tracing(true); + + let span_id = call_in_wasm( + "test_enter_span", + &[], + wasm_method, + &mut ext, + ).unwrap(); + + let span_id = u64::decode(&mut &span_id[..]).unwrap(); + + assert!( + span_id > 0 + ); + + call_in_wasm( + "test_exit_span", + &span_id.encode(), + wasm_method, + &mut ext, + ).unwrap(); + + // Check there is only the single trace + let len = traces.lock().unwrap().len(); + assert_eq!(len, 1); + + let span_datum = traces.lock().unwrap().pop().unwrap(); + let values = span_datum.values.into_inner(); + assert_eq!(span_datum.target, "integration_test_span_target"); + assert_eq!(span_datum.name, "integration_test_span_name"); + assert_eq!(values.get("wasm").unwrap(), "true"); + assert_eq!(values.get("is_valid_trace").unwrap(), "true"); +} diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index bc402442b9..c4345648ef 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -15,10 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] erased-serde = "0.3.9" log = { version = "0.4.8" } parking_lot = "0.10.0" +rustc-hash = "1.1.0" serde = "1.0.101" serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } tracing-core = "0.1.7" +sp-tracing = { version = "2.0.0-rc2", path = "../../primitives/tracing" } sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index d450700ed3..c62b8d5b1e 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -24,7 +24,7 @@ //! //! Currently we provide `Log` (default), `Telemetry` variants for `Receiver` -use std::collections::HashMap; +use rustc_hash::FxHashMap; use std::fmt; use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Duration, Instant}; @@ -38,10 +38,14 @@ use tracing_core::{ Level, metadata::Metadata, span::{Attributes, Id, Record}, - subscriber::Subscriber + subscriber::Subscriber, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_tracing::proxy::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; + +const ZERO_DURATION: Duration = Duration::from_nanos(0); +const PROXY_TARGET: &'static str = "sp_tracing::proxy"; /// Used to configure how to receive the metrics #[derive(Debug, Clone)] @@ -58,36 +62,55 @@ impl Default for TracingReceiver { } } +/// A handler for tracing `SpanDatum` +pub trait TraceHandler: Send + Sync { + /// Process a `SpanDatum` + fn process_span(&self, span: SpanDatum); +} + +/// Represents a single instance of a tracing span #[derive(Debug)] -struct SpanDatum { - id: u64, - name: &'static str, - target: &'static str, - level: Level, - line: u32, - start_time: Instant, - overall_time: Duration, - values: Visitor, +pub struct SpanDatum { + pub id: u64, + pub name: String, + pub target: String, + pub level: Level, + pub line: u32, + pub start_time: Instant, + pub overall_time: Duration, + pub values: Visitor, } +/// Holds associated values for a tracing span #[derive(Clone, Debug)] -struct Visitor(Vec<(String, String)>); +pub struct Visitor(FxHashMap); + +impl Visitor { + /// Consume the Visitor, returning the inner FxHashMap + pub fn into_inner(self) -> FxHashMap { + self.0 + } +} impl Visit for Visitor { fn record_i64(&mut self, field: &Field, value: i64) { - self.record_debug(field, &value) + self.0.insert(field.name().to_string(), value.to_string()); } fn record_u64(&mut self, field: &Field, value: u64) { - self.record_debug(field, &value) + self.0.insert(field.name().to_string(), value.to_string()); } fn record_bool(&mut self, field: &Field, value: bool) { - self.record_debug(field, &value) + self.0.insert(field.name().to_string(), value.to_string()); + } + + fn record_str(&mut self, field: &Field, value: &str) { + self.0.insert(field.name().to_string(), value.to_owned()); } fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - self.0.push((field.name().to_string(), format!("{:?}",value))); + self.0.insert(field.name().to_string(), format!("{:?}", value)); } } @@ -105,7 +128,7 @@ impl Serialize for Visitor { impl fmt::Display for Visitor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let values = self.0.iter().map(|(k,v)| format!("{}={}",k,v)).collect::>().join(", "); + let values = self.0.iter().map(|(k, v)| format!("{}={}", k, v)).collect::>().join(", "); write!(f, "{}", values) } } @@ -135,23 +158,50 @@ impl Value for Visitor { pub struct ProfilingSubscriber { next_id: AtomicU64, targets: Vec<(String, Level)>, - receiver: TracingReceiver, - span_data: Mutex>, + trace_handler: Box, + span_data: Mutex>, } impl ProfilingSubscriber { - /// Takes a `Receiver` and a comma separated list of targets, - /// either with a level: "pallet=trace" - /// or without: "pallet". - pub fn new(receiver: TracingReceiver, targets: &str) -> Self { + /// Takes a `TracingReceiver` and a comma separated list of targets, + /// either with a level: "pallet=trace,frame=debug" + /// or without: "pallet,frame" in which case the level defaults to `trace`. + /// wasm_tracing indicates whether to enable wasm traces + pub fn new(receiver: TracingReceiver, targets: &str) -> ProfilingSubscriber { + match receiver { + TracingReceiver::Log => Self::new_with_handler(Box::new(LogTraceHandler), targets), + TracingReceiver::Telemetry => Self::new_with_handler( + Box::new(TelemetryTraceHandler), + targets, + ), + } + } + + /// Allows use of a custom TraceHandler to create a new instance of ProfilingSubscriber. + /// Takes a comma separated list of targets, + /// either with a level, eg: "pallet=trace" + /// or without: "pallet" in which case the level defaults to `trace`. + /// wasm_tracing indicates whether to enable wasm traces + pub fn new_with_handler(trace_handler: Box, targets: &str) + -> ProfilingSubscriber + { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); ProfilingSubscriber { next_id: AtomicU64::new(1), targets, - receiver, - span_data: Mutex::new(HashMap::new()), + trace_handler, + span_data: Mutex::new(FxHashMap::default()), } } + + fn check_target(&self, target: &str, level: &Level) -> bool { + for t in &self.targets { + if target.starts_with(t.0.as_str()) && level <= &t.1 { + return true; + } + } + false + } } // Default to TRACE if no level given or unable to parse Level @@ -173,36 +223,45 @@ fn parse_target(s: &str) -> (String, Level) { impl Subscriber for ProfilingSubscriber { fn enabled(&self, metadata: &Metadata<'_>) -> bool { - for t in &self.targets { - if metadata.target().starts_with(t.0.as_str()) && metadata.level() <= &t.1 { - log::debug!("Enabled target: {}, level: {}", metadata.target(), metadata.level()); - return true; - } else { - log::debug!("Disabled target: {}, level: {}", metadata.target(), metadata.level()); - } + if metadata.target() == PROXY_TARGET || self.check_target(metadata.target(), metadata.level()) { + log::debug!(target: "tracing", "Enabled target: {}, level: {}", metadata.target(), metadata.level()); + true + } else { + log::debug!(target: "tracing", "Disabled target: {}, level: {}", metadata.target(), metadata.level()); + false } - false } fn new_span(&self, attrs: &Attributes<'_>) -> Id { let id = self.next_id.fetch_add(1, Ordering::Relaxed); - let mut values = Visitor(Vec::new()); + let mut values = Visitor(FxHashMap::default()); attrs.record(&mut values); + // If this is a wasm trace, check if target/level is enabled + if let Some(wasm_target) = values.0.get(WASM_TARGET_KEY) { + if !self.check_target(wasm_target, attrs.metadata().level()) { + return Id::from_u64(id); + } + } let span_datum = SpanDatum { id, - name: attrs.metadata().name(), - target: attrs.metadata().target(), + name: attrs.metadata().name().to_owned(), + target: attrs.metadata().target().to_owned(), level: attrs.metadata().level().clone(), line: attrs.metadata().line().unwrap_or(0), start_time: Instant::now(), - overall_time: Duration::from_nanos(0), + overall_time: ZERO_DURATION, values, }; self.span_data.lock().insert(id, span_datum); Id::from_u64(id) } - fn record(&self, _span: &Id, _values: &Record<'_>) {} + fn record(&self, span: &Id, values: &Record<'_>) { + let mut span_data = self.span_data.lock(); + if let Some(s) = span_data.get_mut(&span.into_u64()) { + values.record(&mut s.values); + } + } fn record_follows_from(&self, _span: &Id, _follows: &Id) {} @@ -213,65 +272,89 @@ impl Subscriber for ProfilingSubscriber { let start_time = Instant::now(); if let Some(mut s) = span_data.get_mut(&span.into_u64()) { s.start_time = start_time; - } else { - log::warn!("Tried to enter span {:?} that has already been closed!", span); } } fn exit(&self, span: &Id) { - let mut span_data = self.span_data.lock(); let end_time = Instant::now(); + let mut span_data = self.span_data.lock(); if let Some(mut s) = span_data.get_mut(&span.into_u64()) { s.overall_time = end_time - s.start_time + s.overall_time; } } fn try_close(&self, span: Id) -> bool { - let mut span_data = self.span_data.lock(); - if let Some(data) = span_data.remove(&span.into_u64()) { - self.send_span(data); + let span_datum = { + let mut span_data = self.span_data.lock(); + span_data.remove(&span.into_u64()) + }; + if let Some(mut span_datum) = span_datum { + if span_datum.name == WASM_TRACE_IDENTIFIER { + span_datum.values.0.insert("wasm".to_owned(), "true".to_owned()); + if let Some(n) = span_datum.values.0.remove(WASM_NAME_KEY) { + span_datum.name = n; + } + if let Some(t) = span_datum.values.0.remove(WASM_TARGET_KEY) { + span_datum.target = t; + } + } + if self.check_target(&span_datum.target, &span_datum.level) { + self.trace_handler.process_span(span_datum); + } }; true } } -impl ProfilingSubscriber { - fn send_span(&self, span_datum: SpanDatum) { - match self.receiver { - TracingReceiver::Log => print_log(span_datum), - TracingReceiver::Telemetry => send_telemetry(span_datum), - } +/// TraceHandler for sending span data to the logger +pub struct LogTraceHandler; + +fn log_level(level: Level) -> log::Level { + match level { + Level::TRACE => log::Level::Trace, + Level::DEBUG => log::Level::Debug, + Level::INFO => log::Level::Info, + Level::WARN => log::Level::Warn, + Level::ERROR => log::Level::Error, } } -fn print_log(span_datum: SpanDatum) { - if span_datum.values.0.is_empty() { - log::info!("TRACING: {} {}: {}, line: {}, time: {}", - span_datum.level, - span_datum.target, - span_datum.name, - span_datum.line, - span_datum.overall_time.as_nanos(), - ); - } else { - log::info!("TRACING: {} {}: {}, line: {}, time: {}, {}", - span_datum.level, - span_datum.target, - span_datum.name, - span_datum.line, - span_datum.overall_time.as_nanos(), - span_datum.values - ); +impl TraceHandler for LogTraceHandler { + fn process_span(&self, span_datum: SpanDatum) { + if span_datum.values.0.is_empty() { + log::log!( + log_level(span_datum.level), + "{}: {}, time: {}", + span_datum.target, + span_datum.name, + span_datum.overall_time.as_nanos(), + ); + } else { + log::log!( + log_level(span_datum.level), + "{}: {}, time: {}, {}", + span_datum.target, + span_datum.name, + span_datum.overall_time.as_nanos(), + span_datum.values, + ); + } } } -fn send_telemetry(span_datum: SpanDatum) { - telemetry!(SUBSTRATE_INFO; "tracing.profiling"; - "name" => span_datum.name, - "target" => span_datum.target, - "line" => span_datum.line, - "time" => span_datum.overall_time.as_nanos(), - "values" => span_datum.values - ); +/// TraceHandler for sending span data to telemetry, +/// Please see telemetry documentation for details on how to specify endpoints and +/// set the required telemetry level to activate tracing messages +pub struct TelemetryTraceHandler; + +impl TraceHandler for TelemetryTraceHandler { + fn process_span(&self, span_datum: SpanDatum) { + telemetry!(SUBSTRATE_INFO; "tracing.profiling"; + "name" => span_datum.name, + "target" => span_datum.target, + "line" => span_datum.line, + "time" => span_datum.overall_time.as_nanos(), + "values" => span_datum.values + ); + } } - diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 353532b1b4..df66740d65 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -24,6 +24,7 @@ sp-wasm-interface = { version = "2.0.0-rc3", path = "../../primitives/wasm-inter sp-runtime-interface = { version = "2.0.0-rc3", default-features = false, path = "../runtime-interface" } sp-trie = { version = "2.0.0-rc3", optional = true, path = "../../primitives/trie" } sp-externalities = { version = "0.8.0-rc3", optional = true, path = "../externalities" } +sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.10.0", optional = true } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 8d81a84c4c..1d5e01bdff 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -216,7 +216,7 @@ pub trait DefaultChildStorage { /// Clear a child storage key. /// /// For the default child storage at `storage_key`, clear value at `key`. - fn clear ( + fn clear( &mut self, storage_key: &[u8], key: &[u8], @@ -965,6 +965,55 @@ pub trait Logging { } } +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + /// Extension to allow running traces in wasm via Proxy + pub struct TracingProxyExt(sp_tracing::proxy::TracingProxy); +} + +/// Interface that provides functions for profiling the runtime. +#[runtime_interface] +pub trait WasmTracing { + /// To create and enter a `tracing` span, using `sp_tracing::proxy` + /// Returns 0 value to indicate that no further traces should be attempted + fn enter_span(&mut self, target: &str, name: &str) -> u64 { + if sp_tracing::wasm_tracing_enabled() { + match self.extension::() { + Some(proxy) => return proxy.enter_span(target, name), + None => { + if self.register_extension(TracingProxyExt(sp_tracing::proxy::TracingProxy::new())).is_ok() { + if let Some(proxy) = self.extension::() { + return proxy.enter_span(target, name); + } + } else { + log::warn!( + target: "tracing", + "Unable to register extension: TracingProxyExt" + ); + } + } + } + } + log::debug!( + target: "tracing", + "Notify to runtime that tracing is disabled." + ); + 0 + } + + /// Exit a `tracing` span, using `sp_tracing::proxy` + fn exit_span(&mut self, id: u64) { + if let Some(proxy) = self.extension::() { + proxy.exit_span(id) + } else { + log::warn!( + target: "tracing", + "Unable to load extension: TracingProxyExt" + ); + } + } +} + /// Wasm-only interface that provides functions for interacting with the sandbox. #[runtime_interface(wasm_only)] pub trait Sandbox { @@ -1111,6 +1160,7 @@ pub type SubstrateHostFunctions = ( storage::HostFunctions, default_child_storage::HostFunctions, misc::HostFunctions, + wasm_tracing::HostFunctions, offchain::HostFunctions, crypto::HostFunctions, hashing::HostFunctions, diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index f0560adb06..e47d9859c9 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -13,7 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] tracing = { version = "0.1.13", optional = true } +rental = { version = "0.5.5", optional = true } +log = { version = "0.4.8", optional = true } [features] default = [ "std" ] -std = [ "tracing" ] +std = [ "tracing", "rental", "log" ] diff --git a/primitives/tracing/src/lib.rs b/primitives/tracing/src/lib.rs index fa43f812d2..e82d8861cd 100644 --- a/primitives/tracing/src/lib.rs +++ b/primitives/tracing/src/lib.rs @@ -19,13 +19,35 @@ //! //! To trace functions or invidual code in Substrate, this crate provides [`tracing_span`] //! and [`enter_span`]. See the individual docs for how to use these macros. - +//! +//! Note that to allow traces from wasm execution environment there are +//! 2 reserved identifiers for tracing `Field` recording, stored in the consts: +//! `WASM_TARGET_KEY` and `WASM_NAME_KEY` - if you choose to record fields, you +//! must ensure that your identifiers do not clash with either of these. +//! +//! Additionally, we have a const: `WASM_TRACE_IDENTIFIER`, which holds a span name used +//! to signal that the 'actual' span name and target should be retrieved instead from +//! the associated Fields mentioned above. #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +#[macro_use] +extern crate rental; + #[cfg(feature = "std")] #[doc(hidden)] pub use tracing; +#[cfg(feature = "std")] +pub mod proxy; + +#[cfg(feature = "std")] +use std::sync::atomic::{AtomicBool, Ordering}; + +/// Flag to signal whether to run wasm tracing +#[cfg(feature = "std")] +static WASM_TRACING_ENABLED: AtomicBool = AtomicBool::new(false); + /// Runs given code within a tracing span, measuring it's execution time. /// /// If tracing is not enabled, the code is still executed. @@ -83,3 +105,13 @@ macro_rules! if_tracing { macro_rules! if_tracing { ( $if:expr ) => {{}} } + +#[cfg(feature = "std")] +pub fn wasm_tracing_enabled() -> bool { + WASM_TRACING_ENABLED.load(Ordering::Relaxed) +} + +#[cfg(feature = "std")] +pub fn set_wasm_tracing(b: bool) { + WASM_TRACING_ENABLED.store(b, Ordering::Relaxed) +} \ No newline at end of file diff --git a/primitives/tracing/src/proxy.rs b/primitives/tracing/src/proxy.rs new file mode 100644 index 0000000000..270f57aaa6 --- /dev/null +++ b/primitives/tracing/src/proxy.rs @@ -0,0 +1,165 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Proxy to allow entering tracing spans from wasm. +//! +//! Use `enter_span` and `exit_span` to surround the code that you wish to trace +use rental; +use tracing::info_span; + +/// Used to identify a proxied WASM trace +pub const WASM_TRACE_IDENTIFIER: &'static str = "WASM_TRACE"; +/// Used to extract the real `target` from the associated values of the span +pub const WASM_TARGET_KEY: &'static str = "proxied_wasm_target"; +/// Used to extract the real `name` from the associated values of the span +pub const WASM_NAME_KEY: &'static str = "proxied_wasm_name"; + +const MAX_SPANS_LEN: usize = 1000; + +rental! { + pub mod rent_span { + #[rental] + pub struct SpanAndGuard { + span: Box, + guard: tracing::span::Entered<'span>, + } + } +} + +/// Requires a tracing::Subscriber to process span traces, +/// this is available when running with client (and relevant cli params). +pub struct TracingProxy { + next_id: u64, + spans: Vec<(u64, rent_span::SpanAndGuard)>, +} + +impl Drop for TracingProxy { + fn drop(&mut self) { + if !self.spans.is_empty() { + log::debug!( + target: "tracing", + "Dropping TracingProxy with {} un-exited spans, marking as not valid", self.spans.len() + ); + while let Some((_, mut sg)) = self.spans.pop() { + sg.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); + } + } + } +} + +impl TracingProxy { + pub fn new() -> TracingProxy { + TracingProxy { + next_id: 0, + spans: Vec::new(), + } + } +} + +impl TracingProxy { + /// Create and enter a `tracing` Span, returning the span id, + /// which should be passed to `exit_span(id)` to signal that the span should exit. + pub fn enter_span(&mut self, proxied_wasm_target: &str, proxied_wasm_name: &str) -> u64 { + // The identifiers `proxied_wasm_target` and `proxied_wasm_name` must match their associated const, + // WASM_TARGET_KEY and WASM_NAME_KEY. + let span = info_span!(WASM_TRACE_IDENTIFIER, is_valid_trace = true, proxied_wasm_target, proxied_wasm_name); + self.next_id += 1; + let sg = rent_span::SpanAndGuard::new( + Box::new(span), + |span| span.enter(), + ); + self.spans.push((self.next_id, sg)); + if self.spans.len() > MAX_SPANS_LEN { + // This is to prevent unbounded growth of Vec and could mean one of the following: + // 1. Too many nested spans, or MAX_SPANS_LEN is too low. + // 2. Not correctly exiting spans due to misconfiguration / misuse + log::warn!( + target: "tracing", + "TracingProxy MAX_SPANS_LEN exceeded, removing oldest span." + ); + let mut sg = self.spans.remove(0).1; + sg.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); + } + self.next_id + } + + /// Exit a span by dropping it along with it's associated guard. + pub fn exit_span(&mut self, id: u64) { + if self.spans.last().map(|l| id > l.0).unwrap_or(true) { + log::warn!(target: "tracing", "Span id not found in TracingProxy: {}", id); + return; + } + let mut last_span = self.spans.pop().expect("Just checked that there is an element to pop; qed"); + while id < last_span.0 { + log::warn!( + target: "tracing", + "TracingProxy Span ids not equal! id parameter given: {}, last span: {}", + id, + last_span.0, + ); + last_span.1.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); + if let Some(s) = self.spans.pop() { + last_span = s; + } else { + log::warn!(target: "tracing", "Span id not found in TracingProxy {}", id); + return; + } + } + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + fn create_spans(proxy: &mut TracingProxy, qty: usize) -> Vec { + let mut spans = Vec::new(); + for n in 0..qty { + spans.push(proxy.enter_span("target", &format!("{}", n))); + } + spans + } + + #[test] + fn max_spans_len_respected() { + let mut proxy = TracingProxy::new(); + let _spans = create_spans(&mut proxy, MAX_SPANS_LEN + 10); + assert_eq!(proxy.spans.len(), MAX_SPANS_LEN); + // ensure oldest spans removed + assert_eq!(proxy.spans[0].0, 11); + } + + #[test] + fn handles_span_exit_scenarios() { + let mut proxy = TracingProxy::new(); + let _spans = create_spans(&mut proxy, 10); + assert_eq!(proxy.spans.len(), 10); + // exit span normally + proxy.exit_span(10); + assert_eq!(proxy.spans.len(), 9); + // skip and exit outer span without exiting inner, id: 8 instead of 9 + proxy.exit_span(8); + // should have also removed the inner span that was lost + assert_eq!(proxy.spans.len(), 7); + // try to exit span not held + proxy.exit_span(9); + assert_eq!(proxy.spans.len(), 7); + // exit all spans + proxy.exit_span(1); + assert_eq!(proxy.spans.len(), 0); + } +} -- GitLab From 9a5892e187f7d9b3f058b549ad5859793d117d7b Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 18 Jun 2020 10:39:54 +0200 Subject: [PATCH 016/144] Block packet size limit --- client/network/src/protocol.rs | 8 +++++++- client/network/src/protocol/sync.rs | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 764c416495..6e08215050 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -92,6 +92,10 @@ pub(crate) const MIN_VERSION: u32 = 3; // Maximum allowed entries in `BlockResponse` const MAX_BLOCK_DATA_RESPONSE: u32 = 128; +// Maximum total bytes allowed for block bodies in `BlockResponse` +// TODO: increase this to 4Mb once yamux limit is increased +const MAX_BODIES_BYTES: usize = 1 * 1024 * 1024; + /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -762,8 +766,9 @@ impl Protocol { let get_justification = request .fields .contains(message::BlockAttributes::JUSTIFICATION); + let mut total_size = 0; while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { - if blocks.len() >= max { + if blocks.len() >= max || total_size > MAX_BODIES_BYTES { break; } let number = *header.number(); @@ -794,6 +799,7 @@ impl Protocol { trace!(target: "sync", "Missing data for block request."); break; } + total_size += block_data.body.as_ref().map_or(0, |b| b.len()); blocks.push(block_data); match request.direction { message::Direction::Ascending => id = BlockId::Number(number + One::one()), diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 781d410fff..453d3f6f04 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -54,7 +54,8 @@ mod blocks; mod extra_requests; /// Maximum blocks to request in a single packet. -const MAX_BLOCKS_TO_REQUEST: usize = 128; +/// TODO: set to 128 once yamux issue is resolved. +const MAX_BLOCKS_TO_REQUEST: usize = 64; /// Maximum blocks to store in the import queue. const MAX_IMPORTING_BLOCKS: usize = 2048; -- GitLab From 0bb3001a41ed63eb825a04b811cb13c2b2a2515d Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 18 Jun 2020 10:43:03 +0200 Subject: [PATCH 017/144] Revert "Block packet size limit" This reverts commit 9a5892e187f7d9b3f058b549ad5859793d117d7b. --- client/network/src/protocol.rs | 8 +------- client/network/src/protocol/sync.rs | 3 +-- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 6e08215050..764c416495 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -92,10 +92,6 @@ pub(crate) const MIN_VERSION: u32 = 3; // Maximum allowed entries in `BlockResponse` const MAX_BLOCK_DATA_RESPONSE: u32 = 128; -// Maximum total bytes allowed for block bodies in `BlockResponse` -// TODO: increase this to 4Mb once yamux limit is increased -const MAX_BODIES_BYTES: usize = 1 * 1024 * 1024; - /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -766,9 +762,8 @@ impl Protocol { let get_justification = request .fields .contains(message::BlockAttributes::JUSTIFICATION); - let mut total_size = 0; while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { - if blocks.len() >= max || total_size > MAX_BODIES_BYTES { + if blocks.len() >= max { break; } let number = *header.number(); @@ -799,7 +794,6 @@ impl Protocol { trace!(target: "sync", "Missing data for block request."); break; } - total_size += block_data.body.as_ref().map_or(0, |b| b.len()); blocks.push(block_data); match request.direction { message::Direction::Ascending => id = BlockId::Number(number + One::one()), diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 453d3f6f04..781d410fff 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -54,8 +54,7 @@ mod blocks; mod extra_requests; /// Maximum blocks to request in a single packet. -/// TODO: set to 128 once yamux issue is resolved. -const MAX_BLOCKS_TO_REQUEST: usize = 64; +const MAX_BLOCKS_TO_REQUEST: usize = 128; /// Maximum blocks to store in the import queue. const MAX_IMPORTING_BLOCKS: usize = 2048; -- GitLab From 94023340a1ff4ccde5a46fe2395f73e5d6fbcb95 Mon Sep 17 00:00:00 2001 From: ddorgan Date: Thu, 18 Jun 2020 10:48:34 +0100 Subject: [PATCH 018/144] Update s3 artifact url (#6399) --- .maintain/flamingfir-deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/flamingfir-deploy.sh b/.maintain/flamingfir-deploy.sh index 596bb04ece..8f0fb3a2bc 100755 --- a/.maintain/flamingfir-deploy.sh +++ b/.maintain/flamingfir-deploy.sh @@ -5,7 +5,7 @@ RETRY_ATTEMPT=0 SLEEP_TIME=15 TARGET_HOST="$1" COMMIT=$(cat artifacts/substrate/VERSION) -DOWNLOAD_URL="https://releases.parity.io/substrate/x86_64-debian:stretch/${COMMIT}/substrate" +DOWNLOAD_URL="https://releases.parity.io/substrate/x86_64-debian:stretch/${COMMIT}/substrate/substrate" POST_DATA='{"extra_vars":{"artifact_path":"'${DOWNLOAD_URL}'","target_host":"'${TARGET_HOST}'"}}' JOB_ID=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" --header "Content-type: application/json" --post-data "${POST_DATA}" https://ansible-awx.parity.io/api/v2/job_templates/32/launch/ | jq .job) -- GitLab From 44978b9b13cc0bd235519c5f1122f9b2ed8ff807 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 18 Jun 2020 13:55:45 +0200 Subject: [PATCH 019/144] Increase network buffer sizes even more (#6080) --- client/network/src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 2297fe6a52..4b4a040e83 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -298,8 +298,8 @@ impl NetworkWorker { }; let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER) - .notify_handler_buffer_size(NonZeroUsize::new(16).expect("16 != 0; qed")) - .connection_event_buffer_size(128); + .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) + .connection_event_buffer_size(1024); if let Some(spawner) = params.executor { struct SpawnImpl(F); impl + Send>>)> Executor for SpawnImpl { -- GitLab From cb833913c4308b6342474d3ec16e4e34ddf43670 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Thu, 18 Jun 2020 17:01:23 +0200 Subject: [PATCH 020/144] Remove pallet-balances from non-dev-deps (#6407) --- frame/atomic-swap/Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index be197096e7..a3bf95b2e2 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primiti sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } +pallet-balances = { version = "2.0.0-rc3", path = "../balances" } [features] default = ["std"] @@ -35,5 +35,4 @@ std = [ "sp-std/std", "sp-io/std", "sp-core/std", - "pallet-balances/std", ] -- GitLab From 9b08492e1e7d9a939c0cd9de19059be0b42c4deb Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 18 Jun 2020 20:37:49 +0200 Subject: [PATCH 021/144] Babe VRF Signing in keystore (#6225) * Introduce trait * Implement VRFSigner in keystore * Use vrf_sign from keystore * Convert output to VRFInOut * Simplify conversion * vrf_sign secondary slot using keystore * Fix RPC call to claim_slot * Use Public instead of Pair * Check primary threshold in signer * Fix interface to return error * Move vrf_sign to BareCryptoStore * Fix authorship_works test * Fix BABE logic leaks * Acquire a read lock once * Also fix RPC acquiring the read lock once * Implement a generic way to construct VRF Transcript * Use make_transcript_data to call sr25519_vrf_sign * Make sure VRFTranscriptData is serializable * Cleanup * Move VRF to it's own module * Implement & test VRF signing in testing module * Remove leftover * Fix feature requirements * Revert removing vec macro * Drop keystore pointer to prevent deadlock * Nitpicks * Add test to make sure make_transcript works * Fix mismatch in VRF transcript * Add a test to verify transcripts match in babe * Return VRFOutput and VRFProof from keystore --- Cargo.lock | 4 + client/consensus/babe/Cargo.toml | 1 + client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/babe/rpc/src/lib.rs | 22 ++-- client/consensus/babe/src/authorship.rs | 133 ++++++++++++++---------- client/consensus/babe/src/tests.rs | 48 ++++++++- client/keystore/Cargo.toml | 3 +- client/keystore/src/lib.rs | 21 +++- primitives/consensus/babe/Cargo.toml | 2 + primitives/consensus/babe/src/lib.rs | 19 ++++ primitives/core/Cargo.toml | 1 + primitives/core/src/lib.rs | 2 + primitives/core/src/testing.rs | 86 ++++++++++++--- primitives/core/src/traits.rs | 45 +++++--- primitives/core/src/vrf.rs | 99 ++++++++++++++++++ 15 files changed, 394 insertions(+), 94 deletions(-) create mode 100644 primitives/core/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index 2c1d3e2c4b..1a1cee642e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6092,6 +6092,7 @@ dependencies = [ "parking_lot 0.10.2", "pdqselect", "rand 0.7.3", + "rand_chacha 0.2.2", "sc-block-builder", "sc-client-api", "sc-consensus-epochs", @@ -6425,6 +6426,7 @@ version = "2.0.0-rc3" dependencies = [ "derive_more", "hex", + "merlin", "parking_lot 0.10.2", "rand 0.7.3", "serde_json", @@ -7456,6 +7458,7 @@ dependencies = [ "sp-application-crypto", "sp-consensus", "sp-consensus-vrf", + "sp-core", "sp-inherents", "sp-runtime", "sp-std", @@ -7511,6 +7514,7 @@ dependencies = [ "pretty_assertions", "primitive-types", "rand 0.7.3", + "rand_chacha 0.2.2", "regex", "schnorrkel", "serde", diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 86bc5b19f1..cf4e32a94c 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -58,6 +58,7 @@ sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../se substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } sc-block-builder = { version = "0.8.0-rc3", path = "../../block-builder" } env_logger = "0.7.0" +rand_chacha = "0.2.2" tempfile = "3.1.0" [features] diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 79cff3eb38..401434cadb 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -27,12 +27,12 @@ derive_more = "0.99.2" sp-api = { version = "2.0.0-rc3", path = "../../../../primitives/api" } sp-consensus = { version = "0.8.0-rc3", path = "../../../../primitives/consensus/common" } sp-core = { version = "2.0.0-rc3", path = "../../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc3", path = "../../../../primitives/application-crypto" } sc-keystore = { version = "2.0.0-rc3", path = "../../../keystore" } [dev-dependencies] sc-consensus = { version = "0.8.0-rc3", path = "../../../consensus/common" } serde_json = "1.0.50" -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../../primitives/application-crypto" } sp-keyring = { version = "2.0.0-rc3", path = "../../../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 35000770d4..652f4f00ba 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -32,6 +32,11 @@ use sp_consensus_babe::{ digests::PreDigest, }; use serde::{Deserialize, Serialize}; +use sp_core::{ + crypto::Public, + traits::BareCryptoStore, +}; +use sp_application_crypto::AppKey; use sc_keystore::KeyStorePtr; use sc_rpc_api::DenyUnsafe; use sp_api::{ProvideRuntimeApi, BlockId}; @@ -125,22 +130,23 @@ impl BabeApi for BabeRpcHandler let mut claims: HashMap = HashMap::new(); - let key_pairs = { - let keystore = keystore.read(); + let keys = { + let ks = keystore.read(); epoch.authorities.iter() .enumerate() - .flat_map(|(i, a)| { - keystore - .key_pair::(&a.0) - .ok() - .map(|kp| (kp, i)) + .filter_map(|(i, a)| { + if ks.has_keys(&[(a.0.to_raw_vec(), AuthorityId::ID)]) { + Some((a.0.clone(), i)) + } else { + None + } }) .collect::>() }; for slot_number in epoch_start..epoch_end { if let Some((claim, key)) = - authorship::claim_slot_using_key_pairs(slot_number, &epoch, &key_pairs) + authorship::claim_slot_using_keys(slot_number, &epoch, &keystore, &keys) { match claim { PreDigest::Primary { .. } => { diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 1a6852c0c1..dfca491eaa 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -16,18 +16,24 @@ //! BABE authority selection and slot claiming. +use sp_application_crypto::AppKey; use sp_consensus_babe::{ - make_transcript, AuthorityId, BabeAuthorityWeight, BABE_VRF_PREFIX, - SlotNumber, AuthorityPair, + BABE_VRF_PREFIX, + AuthorityId, BabeAuthorityWeight, + SlotNumber, + make_transcript, + make_transcript_data, }; use sp_consensus_babe::digests::{ PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, }; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::{U256, blake2_256}; +use sp_core::{U256, blake2_256, crypto::Public, traits::BareCryptoStore}; use codec::Encode; -use schnorrkel::vrf::VRFInOut; -use sp_core::Pair; +use schnorrkel::{ + keys::PublicKey, + vrf::VRFInOut, +}; use sc_keystore::KeyStorePtr; use super::Epoch; @@ -124,7 +130,8 @@ pub(super) fn secondary_slot_author( fn claim_secondary_slot( slot_number: SlotNumber, epoch: &Epoch, - key_pairs: &[(AuthorityPair, usize)], + keys: &[(AuthorityId, usize)], + keystore: &KeyStorePtr, author_secondary_vrf: bool, ) -> Option<(PreDigest, AuthorityId)> { let Epoch { authorities, randomness, epoch_index, .. } = epoch; @@ -139,31 +146,39 @@ fn claim_secondary_slot( *randomness, )?; - for (pair, authority_index) in key_pairs { - if pair.public() == *expected_author { + for (authority_id, authority_index) in keys { + if authority_id == expected_author { let pre_digest = if author_secondary_vrf { - let transcript = super::authorship::make_transcript( + let transcript_data = super::authorship::make_transcript_data( randomness, slot_number, *epoch_index, ); - - let s = get_keypair(&pair).vrf_sign(transcript); - - PreDigest::SecondaryVRF(SecondaryVRFPreDigest { - slot_number, - vrf_output: VRFOutput(s.0.to_output()), - vrf_proof: VRFProof(s.1), - authority_index: *authority_index as u32, - }) + let result = keystore.read().sr25519_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + transcript_data, + ); + if let Ok(signature) = result { + Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { + slot_number, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof), + authority_index: *authority_index as u32, + })) + } else { + None + } } else { - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot_number, authority_index: *authority_index as u32, - }) + })) }; - return Some((pre_digest, pair.public())); + if let Some(pre_digest) = pre_digest { + return Some((pre_digest, authority_id.clone())); + } } } @@ -179,26 +194,22 @@ pub fn claim_slot( epoch: &Epoch, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityId)> { - let key_pairs = { - let keystore = keystore.read(); - epoch.authorities.iter() - .enumerate() - .flat_map(|(i, a)| { - keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) - }) - .collect::>() - }; - claim_slot_using_key_pairs(slot_number, epoch, &key_pairs) + let authorities = epoch.authorities.iter() + .enumerate() + .map(|(index, a)| (a.0.clone(), index)) + .collect::>(); + claim_slot_using_keys(slot_number, epoch, keystore, &authorities) } /// Like `claim_slot`, but allows passing an explicit set of key pairs. Useful if we intend /// to make repeated calls for different slots using the same key pairs. -pub fn claim_slot_using_key_pairs( +pub fn claim_slot_using_keys( slot_number: SlotNumber, epoch: &Epoch, - key_pairs: &[(AuthorityPair, usize)], + keystore: &KeyStorePtr, + keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { - claim_primary_slot(slot_number, epoch, epoch.config.c, &key_pairs) + claim_primary_slot(slot_number, epoch, epoch.config.c, keystore, &keys) .or_else(|| { if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() @@ -206,7 +217,8 @@ pub fn claim_slot_using_key_pairs( claim_secondary_slot( slot_number, &epoch, - &key_pairs, + keys, + keystore, epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), ) } else { @@ -215,11 +227,6 @@ pub fn claim_slot_using_key_pairs( }) } -fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { - use sp_core::crypto::IsWrappedBy; - sp_core::sr25519::Pair::from_ref(q).as_ref() -} - /// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. /// This hashes the slot number, epoch, genesis hash, and chain randomness into /// the VRF. If the VRF produces a value less than `threshold`, it is our turn, @@ -228,33 +235,49 @@ fn claim_primary_slot( slot_number: SlotNumber, epoch: &Epoch, c: (u64, u64), - key_pairs: &[(AuthorityPair, usize)], + keystore: &KeyStorePtr, + keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { let Epoch { authorities, randomness, epoch_index, .. } = epoch; - for (pair, authority_index) in key_pairs { - let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index); - + for (authority_id, authority_index) in keys { + let transcript = super::authorship::make_transcript( + randomness, + slot_number, + *epoch_index + ); + let transcript_data = super::authorship::make_transcript_data( + randomness, + slot_number, + *epoch_index + ); // Compute the threshold we will use. // // We already checked that authorities contains `key.public()`, so it can't // be empty. Therefore, this division in `calculate_threshold` is safe. let threshold = super::authorship::calculate_primary_threshold(c, authorities, *authority_index); - let pre_digest = get_keypair(pair) - .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) - .map(|s| { - PreDigest::Primary(PrimaryPreDigest { + let result = keystore.read().sr25519_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + transcript_data, + ); + if let Ok(signature) = result { + let public = PublicKey::from_bytes(&authority_id.to_raw_vec()).ok()?; + let inout = match signature.output.attach_input_hash(&public, transcript) { + Ok(inout) => inout, + Err(_) => continue, + }; + if super::authorship::check_primary_threshold(&inout, threshold) { + let pre_digest = PreDigest::Primary(PrimaryPreDigest { slot_number, - vrf_output: VRFOutput(s.0.to_output()), - vrf_proof: VRFProof(s.1), + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, - }) - }); + }); - // early exit on first successful claim - if let Some(pre_digest) = pre_digest { - return Some((pre_digest, pair.public())); + return Some((pre_digest, authority_id.clone())); + } } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index ada1332295..1caed18c17 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -21,8 +21,14 @@ #![allow(deprecated)] use super::*; use authorship::claim_slot; -use sp_core::crypto::Pair; -use sp_consensus_babe::{AuthorityPair, SlotNumber, AllowedSlots}; +use sp_core::{crypto::Pair, vrf::make_transcript as transcript_from_data}; +use sp_consensus_babe::{ + AuthorityPair, + SlotNumber, + AllowedSlots, + make_transcript, + make_transcript_data, +}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, RecordProof, @@ -35,6 +41,11 @@ use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; use sc_client_api::{BlockchainEvents, backend::TransactionFor}; use log::debug; use std::{time::Duration, cell::RefCell, task::Poll}; +use rand::RngCore; +use rand_chacha::{ + rand_core::SeedableRng, + ChaChaRng, +}; type Item = DigestItem; @@ -796,3 +807,36 @@ fn verify_slots_are_strictly_increasing() { &mut block_import, ); } + +#[test] +fn babe_transcript_generation_match() { + let _ = env_logger::try_init(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") + .expect("Generates authority pair"); + + let epoch = Epoch { + start_slot: 0, + authorities: vec![(pair.public(), 1)], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + config: BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + }; + + let orig_transcript = make_transcript(&epoch.randomness.clone(), 1, epoch.epoch_index); + let new_transcript = make_transcript_data(&epoch.randomness, 1, epoch.epoch_index); + + let test = |t: merlin::Transcript| -> [u8; 16] { + let mut b = [0u8; 16]; + t.build_rng() + .finalize(&mut ChaChaRng::from_seed([0u8;32])) + .fill_bytes(&mut b); + b + }; + debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript))); +} diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 7ceffc9061..47308dd692 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -18,10 +18,11 @@ derive_more = "0.99.2" sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } hex = "0.4.0" +merlin = { version = "2.0", default-features = false } +parking_lot = "0.10.0" rand = "0.7.2" serde_json = "1.0.41" subtle = "2.1.1" -parking_lot = "0.10.0" [dev-dependencies] tempfile = "3.1.0" diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 6510bb8232..5be4d6d12c 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -20,7 +20,9 @@ use std::{collections::{HashMap, HashSet}, path::PathBuf, fs::{self, File}, io::{self, Write}, sync::Arc}; use sp_core::{ crypto::{IsWrappedBy, CryptoTypePublicPair, KeyTypeId, Pair as PairT, Protected, Public}, - traits::{BareCryptoStore, BareCryptoStoreError as TraitError}, + traits::{BareCryptoStore, Error as TraitError}, + sr25519::{Public as Sr25519Public, Pair as Sr25519Pair}, + vrf::{VRFTranscriptData, VRFSignature, make_transcript}, Encode, }; use sp_application_crypto::{AppKey, AppPublic, AppPair, ed25519, sr25519, ecdsa}; @@ -438,6 +440,23 @@ impl BareCryptoStore for Store { fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { public_keys.iter().all(|(p, t)| self.key_phrase_by_type(&p, *t).is_ok()) } + + fn sr25519_vrf_sign( + &self, + key_type: KeyTypeId, + public: &Sr25519Public, + transcript_data: VRFTranscriptData, + ) -> std::result::Result { + let transcript = make_transcript(transcript_data); + let pair = self.key_pair_by_type::(public, key_type) + .map_err(|e| TraitError::PairNotFound(e.to_string()))?; + + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); + Ok(VRFSignature { + output: inout.to_output(), + proof, + }) + } } #[cfg(test)] diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 4884e9a9f4..538b0a5b05 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -17,6 +17,7 @@ codec = { package = "parity-scale-codec", version = "1.3.0", default-features = merlin = { version = "2.0", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } +sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } sp-consensus = { version = "0.8.0-rc3", optional = true, path = "../common" } sp-consensus-vrf = { version = "0.8.0-rc3", path = "../vrf", default-features = false } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../inherents" } @@ -26,6 +27,7 @@ sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../ [features] default = ["std"] std = [ + "sp-core/std", "sp-application-crypto/std", "codec/std", "merlin/std", diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 9848715a47..10d4aa5ae5 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -31,6 +31,8 @@ pub use merlin::Transcript; use codec::{Encode, Decode}; use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +#[cfg(feature = "std")] +use sp_core::vrf::{VRFTranscriptData, VRFTranscriptValue}; use crate::digests::{NextEpochDescriptor, NextConfigDescriptor}; mod app { @@ -94,6 +96,23 @@ pub fn make_transcript( transcript } +/// Make a VRF transcript data container +#[cfg(feature = "std")] +pub fn make_transcript_data( + randomness: &Randomness, + slot_number: u64, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &BABE_ENGINE_ID, + items: vec![ + ("slot number", VRFTranscriptValue::U64(slot_number)), + ("current epoch", VRFTranscriptValue::U64(epoch)), + ("chain randomness", VRFTranscriptValue::Bytes(&randomness[..])), + ] + } +} + /// An consensus log item for BABE. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index e1a281da6b..69872349ff 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -59,6 +59,7 @@ hex-literal = "0.2.1" rand = "0.7.2" criterion = "0.2.11" serde_json = "1.0" +rand_chacha = "0.2.2" [[bench]] name = "bench" diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 5fbbf3ca6d..1038c887e2 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -73,6 +73,8 @@ pub mod traits; pub mod testing; #[cfg(feature = "std")] pub mod tasks; +#[cfg(feature = "std")] +pub mod vrf; pub use self::hash::{H160, H256, H512, convert_hash}; pub use self::uint::{U256, U512}; diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index d31fabce5b..1d88e1fad5 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -22,10 +22,12 @@ use crate::crypto::KeyTypeId; use crate::{ crypto::{Pair, Public, CryptoTypePublicPair}, ed25519, sr25519, ecdsa, - traits::BareCryptoStoreError + traits::Error, + vrf::{VRFTranscriptData, VRFSignature, make_transcript}, }; #[cfg(feature = "std")] use std::collections::HashSet; + /// Key type for generic Ed25519 key. pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); /// Key type for generic Sr 25519 key. @@ -76,7 +78,7 @@ impl KeyStore { #[cfg(feature = "std")] impl crate::traits::BareCryptoStore for KeyStore { - fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError> { + fn keys(&self, id: KeyTypeId) -> Result, Error> { self.keys .get(&id) .map(|map| { @@ -106,11 +108,11 @@ impl crate::traits::BareCryptoStore for KeyStore { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { let pair = sr25519::Pair::from_string(seed, None) - .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `sr25519` pair.".to_owned()))?; + .map_err(|_| Error::ValidationError("Generates an `sr25519` pair.".to_owned()))?; self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, @@ -137,11 +139,11 @@ impl crate::traits::BareCryptoStore for KeyStore { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { let pair = ed25519::Pair::from_string(seed, None) - .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `ed25519` pair.".to_owned()))?; + .map_err(|_| Error::ValidationError("Generates an `ed25519` pair.".to_owned()))?; self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, @@ -168,11 +170,11 @@ impl crate::traits::BareCryptoStore for KeyStore { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { let pair = ecdsa::Pair::from_string(seed, None) - .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `ecdsa` pair.".to_owned()))?; + .map_err(|_| Error::ValidationError("Generates an `ecdsa` pair.".to_owned()))?; self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, @@ -201,7 +203,7 @@ impl crate::traits::BareCryptoStore for KeyStore { &self, id: KeyTypeId, keys: Vec, - ) -> std::result::Result, BareCryptoStoreError> { + ) -> std::result::Result, Error> { let provided_keys = keys.into_iter().collect::>(); let all_keys = self.keys(id)?.into_iter().collect::>(); @@ -213,31 +215,48 @@ impl crate::traits::BareCryptoStore for KeyStore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, BareCryptoStoreError> { + ) -> Result, Error> { use codec::Encode; match key.0 { ed25519::CRYPTO_ID => { let key_pair: ed25519::Pair = self .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) - .ok_or(BareCryptoStoreError::PairNotFound("ed25519".to_owned()))?; + .ok_or(Error::PairNotFound("ed25519".to_owned()))?; return Ok(key_pair.sign(msg).encode()); } sr25519::CRYPTO_ID => { let key_pair: sr25519::Pair = self .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) - .ok_or(BareCryptoStoreError::PairNotFound("sr25519".to_owned()))?; + .ok_or(Error::PairNotFound("sr25519".to_owned()))?; return Ok(key_pair.sign(msg).encode()); } ecdsa::CRYPTO_ID => { let key_pair: ecdsa::Pair = self .ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())) - .ok_or(BareCryptoStoreError::PairNotFound("ecdsa".to_owned()))?; + .ok_or(Error::PairNotFound("ecdsa".to_owned()))?; return Ok(key_pair.sign(msg).encode()); } - _ => Err(BareCryptoStoreError::KeyNotSupported(id)) + _ => Err(Error::KeyNotSupported(id)) } } + + fn sr25519_vrf_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + transcript_data: VRFTranscriptData, + ) -> Result { + let transcript = make_transcript(transcript_data); + let pair = self.sr25519_key_pair(key_type, public) + .ok_or(Error::PairNotFound("Not found".to_owned()))?; + + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); + Ok(VRFSignature { + output: inout.to_output(), + proof, + }) + } } /// Macro for exporting functions from wasm in with the expected signature for using it with the @@ -372,6 +391,7 @@ mod tests { use super::*; use crate::sr25519; use crate::testing::{ED25519, SR25519}; + use crate::vrf::VRFTranscriptValue; #[test] fn store_key_and_extract() { @@ -403,4 +423,42 @@ mod tests { assert!(public_keys.contains(&key_pair.public().into())); } + + #[test] + fn vrf_sign() { + let store = KeyStore::new(); + + let secret_uri = "//Alice"; + let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); + + let transcript_data = VRFTranscriptData { + label: b"Test", + items: vec![ + ("one", VRFTranscriptValue::U64(1)), + ("two", VRFTranscriptValue::U64(2)), + ("three", VRFTranscriptValue::Bytes("test".as_bytes())), + ] + }; + + let result = store.read().sr25519_vrf_sign( + SR25519, + &key_pair.public(), + transcript_data.clone(), + ); + assert!(result.is_err()); + + store.write().insert_unknown( + SR25519, + secret_uri, + key_pair.public().as_ref(), + ).expect("Inserts unknown key"); + + let result = store.read().sr25519_vrf_sign( + SR25519, + &key_pair.public(), + transcript_data, + ); + + assert!(result.is_ok()); + } } diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 0d5bc14fb4..4481145818 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -19,9 +19,9 @@ use crate::{ crypto::{KeyTypeId, CryptoTypePublicPair}, + vrf::{VRFTranscriptData, VRFSignature}, ed25519, sr25519, ecdsa, }; - use std::{ borrow::Cow, fmt::{Debug, Display}, @@ -33,7 +33,7 @@ pub use sp_externalities::{Externalities, ExternalitiesExt}; /// BareCryptoStore error #[derive(Debug, derive_more::Display)] -pub enum BareCryptoStoreError { +pub enum Error { /// Public key type is not supported #[display(fmt="Key not supported: {:?}", _0)] KeyNotSupported(KeyTypeId), @@ -64,7 +64,7 @@ pub trait BareCryptoStore: Send + Sync { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result; + ) -> Result; /// Returns all ed25519 public keys for the given key type. fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; /// Generate a new ed25519 key pair for the given key type and an optional seed. @@ -76,7 +76,7 @@ pub trait BareCryptoStore: Send + Sync { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result; + ) -> Result; /// Returns all ecdsa public keys for the given key type. fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec; /// Generate a new ecdsa key pair for the given key type and an optional seed. @@ -88,7 +88,7 @@ pub trait BareCryptoStore: Send + Sync { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result; + ) -> Result; /// Insert a new key. This doesn't require any known of the crypto; but a public key must be /// manually provided. @@ -108,11 +108,11 @@ pub trait BareCryptoStore: Send + Sync { &self, id: KeyTypeId, keys: Vec - ) -> Result, BareCryptoStoreError>; + ) -> Result, Error>; /// List all supported keys /// /// Returns a set of public keys the signer supports. - fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError>; + fn keys(&self, id: KeyTypeId) -> Result, Error>; /// Checks if the private keys for the given public key and key type combinations exist. /// @@ -131,7 +131,7 @@ pub trait BareCryptoStore: Send + Sync { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, BareCryptoStoreError>; + ) -> Result, Error>; /// Sign with any key /// @@ -144,7 +144,7 @@ pub trait BareCryptoStore: Send + Sync { id: KeyTypeId, keys: Vec, msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), BareCryptoStoreError> { + ) -> Result<(CryptoTypePublicPair, Vec), Error> { if keys.len() == 1 { return self.sign_with(id, &keys[0], msg).map(|s| (keys[0].clone(), s)); } else { @@ -154,7 +154,7 @@ pub trait BareCryptoStore: Send + Sync { } } } - Err(BareCryptoStoreError::KeyNotSupported(id)) + Err(Error::KeyNotSupported(id)) } /// Sign with all keys @@ -163,15 +163,36 @@ pub trait BareCryptoStore: Send + Sync { /// each key given that the key is supported. /// /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key or a BareCryptoStoreError for non-supported keys. + /// signature of each key or a Error for non-supported keys. fn sign_with_all( &self, id: KeyTypeId, keys: Vec, msg: &[u8], - ) -> Result, BareCryptoStoreError>>, ()>{ + ) -> Result, Error>>, ()>{ Ok(keys.iter().map(|k| self.sign_with(id, k, msg)).collect()) } + + /// Generate VRF signature for given transcript data. + /// + /// Receives KeyTypeId and Public key to be able to map + /// them to a private key that exists in the keystore which + /// is, in turn, used for signing the provided transcript. + /// + /// Returns a result containing the signature data. + /// Namely, VRFOutput and VRFProof which are returned + /// inside the `VRFSignature` container struct. + /// + /// This function will return an error in the cases where + /// the public key and key type provided do not match a private + /// key in the keystore. Or, in the context of remote signing + /// an error could be a network one. + fn sr25519_vrf_sign( + &self, + key_type: KeyTypeId, + public: &sr25519::Public, + transcript_data: VRFTranscriptData, + ) -> Result; } /// A pointer to the key store. diff --git a/primitives/core/src/vrf.rs b/primitives/core/src/vrf.rs new file mode 100644 index 0000000000..d392587cb7 --- /dev/null +++ b/primitives/core/src/vrf.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! VRF-specifc data types and helpers + +use codec::Encode; +use merlin::Transcript; +use schnorrkel::vrf::{VRFOutput, VRFProof}; +/// An enum whose variants represent possible +/// accepted values to construct the VRF transcript +#[derive(Clone, Encode)] +pub enum VRFTranscriptValue<'a> { + /// Value is an array of bytes + Bytes(&'a [u8]), + /// Value is a u64 integer + U64(u64), +} +/// VRF Transcript data +#[derive(Clone, Encode)] +pub struct VRFTranscriptData<'a> { + /// The transcript's label + pub label: &'static [u8], + /// Additional data to be registered into the transcript + pub items: Vec<(&'static str, VRFTranscriptValue<'a>)>, +} +/// VRF signature data +pub struct VRFSignature { + /// The VRFOutput serialized + pub output: VRFOutput, + /// The calculated VRFProof + pub proof: VRFProof, +} + +/// Construct a `Transcript` object from data. +/// +/// Returns `merlin::Transcript` +pub fn make_transcript(data: VRFTranscriptData) -> Transcript { + let mut transcript = Transcript::new(data.label); + for (label, value) in data.items.into_iter() { + match value { + VRFTranscriptValue::Bytes(bytes) => { + transcript.append_message(label.as_bytes(), &bytes); + }, + VRFTranscriptValue::U64(val) => { + transcript.append_u64(label.as_bytes(), val); + } + } + } + transcript +} + + +#[cfg(test)] +mod tests { + use super::*; + use crate::vrf::VRFTranscriptValue; + use rand::RngCore; + use rand_chacha::{ + rand_core::SeedableRng, + ChaChaRng, + }; + + #[test] + fn transcript_creation_matches() { + let mut orig_transcript = Transcript::new(b"My label"); + orig_transcript.append_u64(b"one", 1); + orig_transcript.append_message(b"two", "test".as_bytes()); + + let new_transcript = make_transcript(VRFTranscriptData { + label: b"My label", + items: vec![ + ("one", VRFTranscriptValue::U64(1)), + ("two", VRFTranscriptValue::Bytes("test".as_bytes())), + ], + }); + let test = |t: Transcript| -> [u8; 16] { + let mut b = [0u8; 16]; + t.build_rng() + .finalize(&mut ChaChaRng::from_seed([0u8;32])) + .fill_bytes(&mut b); + b + }; + debug_assert!(test(orig_transcript) == test(new_transcript)); + } +} -- GitLab From caf9fbe005b6fd4505e824ad969e379b89e72d52 Mon Sep 17 00:00:00 2001 From: Toralf Wittner Date: Fri, 19 Jun 2020 00:22:48 +0200 Subject: [PATCH 022/144] Update `libp2p-ping`. (#6412) Bugfix release, see [CHANGELOG]. [CHANGELOG]: https://github.com/libp2p/rust-libp2p/blob/master/protocols/ping/CHANGELOG.md --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a1cee642e..86744c2537 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2776,9 +2776,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c189cf1dfe4b3f01e2c0fe5e97a6f5df8aeb6f3569e26981015eb7c08015ce5f" +checksum = "ffb3c4f9273313357d4977799aec69f581cfe9568854919c5b8066018ccf59f5" dependencies = [ "futures 0.3.4", "libp2p-core", -- GitLab From 7a4bd762e0e8c2ddf959787981cd2e55d080b47d Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 19 Jun 2020 00:23:58 +0200 Subject: [PATCH 023/144] Remove --legacy-network-protocol CLI flag (#6411) --- client/cli/src/params/network_params.rs | 6 - client/network/src/config.rs | 4 - client/network/src/protocol.rs | 164 ++++++------------------ client/network/src/service.rs | 1 - 4 files changed, 36 insertions(+), 139 deletions(-) diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 2e0a6f1973..253585544d 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -102,11 +102,6 @@ pub struct NetworkParams { /// By default this option is true for `--dev` and false otherwise. #[structopt(long)] pub discover_local: bool, - - /// Use the legacy "pre-mainnet-launch" networking protocol. Enable if things seem broken. - /// This option will be removed in the future. - #[structopt(long)] - pub legacy_network_protocol: bool, } impl NetworkParams { @@ -165,7 +160,6 @@ impl NetworkParams { }, max_parallel_downloads: self.max_parallel_downloads, allow_non_globals_in_dht: self.discover_local || is_dev, - use_new_block_requests_protocol: !self.legacy_network_protocol, } } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 6c9bd3adb9..94b2993b4e 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -425,9 +425,6 @@ pub struct NetworkConfiguration { pub max_parallel_downloads: u32, /// Should we insert non-global addresses into the DHT? pub allow_non_globals_in_dht: bool, - /// If true, uses the `//block-requests/` experimental protocol rather than - /// the legacy substream. This option is meant to be hard-wired to `true` in the future. - pub use_new_block_requests_protocol: bool, } impl NetworkConfiguration { @@ -459,7 +456,6 @@ impl NetworkConfiguration { }, max_parallel_downloads: 5, allow_non_globals_in_dht: false, - use_new_block_requests_protocol: true, } } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 764c416495..06f117b3bb 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -250,9 +250,6 @@ pub struct Protocol { metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: Arc>, - /// If true, we send back requests as `CustomMessageOutcome` events. If false, we directly - /// dispatch requests using the legacy substream. - use_new_block_requests_protocol: bool, } #[derive(Default)] @@ -374,7 +371,6 @@ impl Protocol { block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, boot_node_ids: Arc>, - use_new_block_requests_protocol: bool, queue_size_report: Option, ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { let info = chain.info(); @@ -458,7 +454,6 @@ impl Protocol { None }, boot_node_ids, - use_new_block_requests_protocol, }; Ok((protocol, peerset_handle)) @@ -655,16 +650,6 @@ impl Protocol { CustomMessageOutcome::None } - fn send_request(&mut self, who: &PeerId, message: Message) { - send_request::( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - who, - message, - ); - } - fn send_message( &mut self, who: &PeerId, @@ -896,15 +881,10 @@ impl Protocol { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), Ok(sync::OnBlockData::Request(peer, mut req)) => { - if self.use_new_block_requests_protocol { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } - } else { - self.send_request(&peer, GenericMessage::BlockRequest(req)); - CustomMessageOutcome::None + self.update_peer_request(&peer, &mut req); + CustomMessageOutcome::BlockRequest { + target: peer, + request: req, } } Err(sync::BadPeer(id, repu)) => { @@ -1077,15 +1057,11 @@ impl Protocol { match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { Ok(None) => (), Ok(Some(mut req)) => { - if self.use_new_block_requests_protocol { - self.update_peer_request(&who, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: who.clone(), - request: req, - }); - } else { - self.send_request(&who, GenericMessage::BlockRequest(req)) - } + self.update_peer_request(&who, &mut req); + self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { + target: who.clone(), + request: req, + }); }, Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id); @@ -1415,15 +1391,10 @@ impl Protocol { CustomMessageOutcome::BlockImport(origin, blocks) }, Ok(sync::OnBlockData::Request(peer, mut req)) => { - if self.use_new_block_requests_protocol { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } - } else { - self.send_request(&peer, GenericMessage::BlockRequest(req)); - CustomMessageOutcome::None + self.update_peer_request(&peer, &mut req); + CustomMessageOutcome::BlockRequest { + target: peer, + request: req, } } Err(sync::BadPeer(id, repu)) => { @@ -1523,22 +1494,11 @@ impl Protocol { for result in results { match result { Ok((id, mut req)) => { - if self.use_new_block_requests_protocol { - update_peer_request(&mut self.context_data.peers, &id, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: id, - request: req, - }); - } else { - let msg = GenericMessage::BlockRequest(req); - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - msg - ) - } + update_peer_request(&mut self.context_data.peers, &id, &mut req); + self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { + target: id, + request: req, + }); } Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id); @@ -1917,27 +1877,6 @@ pub enum CustomMessageOutcome { None, } -fn send_request( - behaviour: &mut GenericProto, - stats: &mut HashMap<&'static str, PacketStats>, - peers: &mut HashMap>, - who: &PeerId, - mut message: Message, -) { - if let GenericMessage::BlockRequest(ref mut r) = message { - if let Some(ref mut peer) = peers.get_mut(who) { - r.id = peer.next_request_id; - peer.next_request_id += 1; - if let Some((timestamp, request)) = peer.block_request.take() { - trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); - peer.obsolete_requests.insert(request.id, timestamp); - } - peer.block_request = Some((Instant::now(), r.clone())); - } - } - send_message::(behaviour, stats, who, None, message) -} - fn update_peer_request( peers: &mut HashMap>, who: &PeerId, @@ -2032,58 +1971,28 @@ impl NetworkBehaviour for Protocol { } for (id, mut r) in self.sync.block_requests() { - if self.use_new_block_requests_protocol { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id.clone(), - request: r, - }; - self.pending_messages.push_back(event); - } else { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::BlockRequest(r), - ) - } + update_peer_request(&mut self.context_data.peers, &id, &mut r); + let event = CustomMessageOutcome::BlockRequest { + target: id.clone(), + request: r, + }; + self.pending_messages.push_back(event); } for (id, mut r) in self.sync.justification_requests() { - if self.use_new_block_requests_protocol { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id, - request: r, - }; - self.pending_messages.push_back(event); - } else { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::BlockRequest(r), - ) - } + update_peer_request(&mut self.context_data.peers, &id, &mut r); + let event = CustomMessageOutcome::BlockRequest { + target: id, + request: r, + }; + self.pending_messages.push_back(event); } for (id, r) in self.sync.finality_proof_requests() { - if self.use_new_block_requests_protocol { - let event = CustomMessageOutcome::FinalityProofRequest { - target: id, - block_hash: r.block, - request: r.request, - }; - self.pending_messages.push_back(event); - } else { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::FinalityProofRequest(r), - ) - } + let event = CustomMessageOutcome::FinalityProofRequest { + target: id, + block_hash: r.block, + request: r.request, + }; + self.pending_messages.push_back(event); } if let Poll::Ready(Some((peer_id, result))) = self.pending_transactions.poll_next_unpin(cx) { self.on_handle_extrinsic_import(peer_id, result); @@ -2237,7 +2146,6 @@ mod tests { Box::new(DefaultBlockAnnounceValidator::new(client.clone())), None, Default::default(), - true, None, ).unwrap(); diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 4b4a040e83..0d5f037a37 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -224,7 +224,6 @@ impl NetworkWorker { params.block_announce_validator, params.metrics_registry.as_ref(), boot_node_ids.clone(), - params.network_config.use_new_block_requests_protocol, metrics.as_ref().map(|m| m.notifications_queues_size.clone()), )?; -- GitLab From 4f0b60164855339cc645ceeba3c554c5cc5cf59f Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Fri, 19 Jun 2020 08:25:09 +0200 Subject: [PATCH 024/144] Scale and increase validator count (#6417) --- frame/staking/src/lib.rs | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index bd4fb21cb5..aca68bd706 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -303,7 +303,7 @@ use frame_support::{ }; use pallet_session::historical; use sp_runtime::{ - Perbill, PerU16, PerThing, RuntimeDebug, DispatchError, + Percent, Perbill, PerU16, PerThing, RuntimeDebug, DispatchError, curve::PiecewiseLinear, traits::{ Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, AtLeast32Bit, @@ -1794,6 +1794,34 @@ decl_module! { ValidatorCount::put(new); } + /// Increments the ideal number of validators. + /// + /// The dispatch origin must be Root. + /// + /// # + /// Base Weight: 1.717 µs + /// Read/Write: Validator Count + /// # + #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 1)] + fn increase_validator_count(origin, #[compact] additional: u32) { + ensure_root(origin)?; + ValidatorCount::mutate(|n| *n += additional); + } + + /// Scale up the ideal number of validators by a factor. + /// + /// The dispatch origin must be Root. + /// + /// # + /// Base Weight: 1.717 µs + /// Read/Write: Validator Count + /// # + #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 1)] + fn scale_validator_count(origin, factor: Percent) { + ensure_root(origin)?; + ValidatorCount::mutate(|n| *n += factor * *n); + } + /// Force there to be no new eras indefinitely. /// /// The dispatch origin must be Root. -- GitLab From 369f9fc2f5493a94398e8ce132832b9dc4752af4 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 19 Jun 2020 08:26:06 +0200 Subject: [PATCH 025/144] Expose constants from Proxy Pallet (#6420) --- frame/proxy/src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 66e3e76038..bd56ad3f0f 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -135,6 +135,15 @@ decl_module! { /// Deposit one of this module's events by using the default implementation. fn deposit_event() = default; + /// The base amount of currency needed to reserve for creating a proxy. + const ProxyDepositBase: BalanceOf = T::ProxyDepositBase::get(); + + /// The amount of currency needed per proxy added. + const ProxyDepositFactor: BalanceOf = T::ProxyDepositFactor::get(); + + /// The maximum amount of proxies allowed for a single account. + const MaxProxies: u16 = T::MaxProxies::get(); + /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// -- GitLab From 31c3e06ded197bdf28130ac0c5310283b2d1b5b3 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 19 Jun 2020 08:31:42 +0200 Subject: [PATCH 026/144] .maintain/monitoring: Add alerting rule tests (#6343) * .maintain/monitoring: Add alerting rule tests * .maintain/monitoring/alerting-rules/alerting-rules.yaml: Break lines * .gitlab-ci.yml: Add promtool rule testing step --- .gitlab-ci.yml | 1 + .../alerting-rules/alerting-rule-tests.yaml | 239 ++++++++++++++++++ .../alerting-rules/alerting-rules.yaml | 46 ++-- 3 files changed, 271 insertions(+), 15 deletions(-) create mode 100644 .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e146d40ee6..76ae934900 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -367,6 +367,7 @@ test-prometheus-alerting-rules: - curl -L https://github.com/prometheus/prometheus/releases/download/v2.19.0/prometheus-2.19.0.linux-amd64.tar.gz --output prometheus.tar.gz - tar -xzf prometheus.tar.gz - ./prometheus-*/promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml + - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | ./prometheus-*/promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml #### stage: build diff --git a/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml new file mode 100644 index 0000000000..069cfaf977 --- /dev/null +++ b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml @@ -0,0 +1,239 @@ +rule_files: + - /dev/stdin + +evaluation_interval: 1m + +tests: + - interval: 1m + input_series: + - series: 'polkadot_sub_libp2p_peers_count{ + job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '3 2+0x4 1+0x9' # 3 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + + - series: 'polkadot_sub_txpool_validations_scheduled{ + job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '10+1x30' # 10 11 12 13 .. 40 + + - series: 'polkadot_sub_txpool_validations_finished{ + job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '0x30' # 0 0 0 0 .. 0 + + - series: 'polkadot_block_height{ + status="best", job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '1+1x3 4+0x13' # 1 2 3 4 4 4 4 4 4 4 4 4 ... + + - series: 'polkadot_block_height{ + status="finalized", + job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '1+1x3 4+0x13' # 1 2 3 4 4 4 4 4 4 4 4 4 ... + + - series: 'polkadot_cpu_usage_percentage{ + job="polkadot", + pod="polkadot-abcdef01234-abcdef", + instance="polkadot-abcdef01234-abcdef", + }' + values: '0+20x5 100+0x5' # 0 20 40 60 80 100 100 100 100 100 100 + + alert_rule_test: + + ###################################################################### + # Resource usage + ###################################################################### + + - eval_time: 9m + alertname: HighCPUUsage + exp_alerts: + - eval_time: 10m + alertname: HighCPUUsage + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has a CPU + usage higher than 100% for more than 5 minutes" + + ###################################################################### + # Block production + ###################################################################### + + - eval_time: 6m + alertname: LowNumberOfNewBlocks + exp_alerts: + - eval_time: 7m + alertname: LowNumberOfNewBlocks + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: best + exp_annotations: + message: "Less than one new block per minute on instance + polkadot-abcdef01234-abcdef." + + - eval_time: 14m + alertname: LowNumberOfNewBlocks + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: best + exp_annotations: + message: "Less than one new block per minute on instance + polkadot-abcdef01234-abcdef." + - exp_labels: + severity: critical + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: best + exp_annotations: + message: "Less than one new block per minute on instance + polkadot-abcdef01234-abcdef." + + ###################################################################### + # Block finalization + ###################################################################### + + - eval_time: 6m + alertname: BlockFinalizationSlow + exp_alerts: + - eval_time: 7m + alertname: BlockFinalizationSlow + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: finalized + exp_annotations: + message: "Finalized block on instance + polkadot-abcdef01234-abcdef increases by less than 1 per + minute." + + - eval_time: 14m + alertname: BlockFinalizationSlow + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: finalized + exp_annotations: + message: "Finalized block on instance + polkadot-abcdef01234-abcdef increases by less than 1 per + minute." + - exp_labels: + severity: critical + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + status: finalized + exp_annotations: + message: "Finalized block on instance + polkadot-abcdef01234-abcdef increases by less than 1 per + minute." + + ###################################################################### + # Transaction queue + ###################################################################### + + - eval_time: 10m + alertname: TransactionQueueSize + exp_alerts: + - eval_time: 11m + alertname: TransactionQueueSize + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has more + than 10 transactions in the queue for more than 10 + minutes" + + - eval_time: 31m + alertname: TransactionQueueSize + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has more + than 10 transactions in the queue for more than 10 + minutes" + - exp_labels: + severity: critical + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has more + than 10 transactions in the queue for more than 30 + minutes" + + ###################################################################### + # Networking + ###################################################################### + + - eval_time: 3m # Values: 3 2 2 + alertname: LowNumberOfPeers + exp_alerts: + - eval_time: 4m # Values: 2 2 2 + alertname: LowNumberOfPeers + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has less + than 3 peers for more than 3 minutes" + + - eval_time: 16m # Values: 3 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 + alertname: LowNumberOfPeers + exp_alerts: + - exp_labels: + severity: warning + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has less + than 3 peers for more than 3 minutes" + - exp_labels: + severity: critical + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The node polkadot-abcdef01234-abcdef has less + than 3 peers for more than 15 minutes" diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index cb5b3c271d..06d204f7af 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -12,7 +12,8 @@ groups: labels: severity: warning annotations: - message: 'The node {{ $labels.instance }} has a CPU usage higher than 100% for more than 5 minutes' + message: 'The node {{ $labels.instance }} has a CPU usage higher than 100% + for more than 5 minutes' ############################################################################## # Block production @@ -20,14 +21,16 @@ groups: - alert: LowNumberOfNewBlocks annotations: - message: 'Less than one new block per minute on instance {{ $labels.instance }}.' + message: 'Less than one new block per minute on instance {{ + $labels.instance }}.' expr: increase(polkadot_block_height{status="best"}[1m]) < 1 for: 3m labels: severity: warning - alert: LowNumberOfNewBlocks annotations: - message: 'Less than one new block per minute on instance {{ $labels.instance }}.' + message: 'Less than one new block per minute on instance {{ + $labels.instance }}.' expr: increase(polkadot_block_height{status="best"}[1m]) < 1 for: 10m labels: @@ -43,43 +46,51 @@ groups: labels: severity: warning annotations: - message: 'Finalized block on instance {{ $labels.instance }} increases by less than 1 per minute.' + message: 'Finalized block on instance {{ $labels.instance }} increases by + less than 1 per minute.' - alert: BlockFinalizationSlow expr: increase(polkadot_block_height{status="finalized"}[1m]) < 1 for: 10m labels: severity: critical annotations: - message: 'Finalized block on instance {{ $labels.instance }} increases by less than 1 per minute.' + message: 'Finalized block on instance {{ $labels.instance }} increases by + less than 1 per minute.' - alert: BlockFinalizationLaggingBehind # Under the assumption of an average block production of 6 seconds, # "best" and "finalized" being more than 10 blocks apart would imply # more than a 1 minute delay between block production and finalization. - expr: (polkadot_block_height_number{status="best"} - ignoring(status) polkadot_block_height_number{status="finalized"}) > 10 + expr: '(polkadot_block_height_number{status="best"} - ignoring(status) + polkadot_block_height_number{status="finalized"}) > 10' for: 8m labels: severity: critical annotations: - message: "Block finalization on instance {{ $labels.instance }} is behind block production by {{ $value }} for more than 8m" + message: "Block finalization on instance {{ $labels.instance }} is behind + block production by {{ $value }} for more than 8m" ############################################################################## # Transaction queue ############################################################################## - alert: TransactionQueueSize - expr: polkadot_sub_txpool_validations_scheduled - polkadot_sub_txpool_validations_finished > 10 + expr: 'polkadot_sub_txpool_validations_scheduled - + polkadot_sub_txpool_validations_finished > 10' for: 10m labels: severity: warning annotations: - message: 'The node {{ $labels.instance }} has more than 10 transactions in the queue for more than 10 minutes' + message: 'The node {{ $labels.instance }} has more than 10 transactions in + the queue for more than 10 minutes' - alert: TransactionQueueSize - expr: polkadot_sub_txpool_validations_scheduled - polkadot_sub_txpool_validations_finished > 10 + expr: 'polkadot_sub_txpool_validations_scheduled - + polkadot_sub_txpool_validations_finished > 10' for: 30m labels: severity: critical annotations: - message: 'The node {{ $labels.instance }} has more than 10 transactions in the queue for more than 30 minutes' + message: 'The node {{ $labels.instance }} has more than 10 transactions in + the queue for more than 30 minutes' ############################################################################## # Networking @@ -91,23 +102,28 @@ groups: labels: severity: warning annotations: - message: 'The node {{ $labels.instance }} has less than 3 peers for more than 3 minutes' + message: 'The node {{ $labels.instance }} has less than 3 peers for more + than 3 minutes' - alert: LowNumberOfPeers expr: polkadot_sub_libp2p_peers_count < 3 for: 15m labels: severity: critical annotations: - message: 'The node {{ $labels.instance }} has less than 3 peers for more than 15 minutes' + message: 'The node {{ $labels.instance }} has less than 3 peers for more + than 15 minutes' ############################################################################## # Others ############################################################################## - alert: AuthorityDiscoveryHighDiscoveryFailure - expr: polkadot_authority_discovery_handle_value_found_event_failure / ignoring(name) polkadot_authority_discovery_dht_event_received{name="value_found"} > 0.5 + expr: 'polkadot_authority_discovery_handle_value_found_event_failure / + ignoring(name) + polkadot_authority_discovery_dht_event_received{name="value_found"} > 0.5' for: 2h labels: severity: warning annotations: - message: "Authority discovery on node {{ $labels.instance }} fails to process more than 50 % of the values found on the DHT." + message: "Authority discovery on node {{ $labels.instance }} fails to + process more than 50 % of the values found on the DHT." -- GitLab From 31af20346a0f0c47bf776f029e6579acd473b2d3 Mon Sep 17 00:00:00 2001 From: s3krit Date: Fri, 19 Jun 2020 08:59:59 +0200 Subject: [PATCH 027/144] [CI] Label PRs if polkadot companion build fails (#6410) * add polkadot-companion-labels.yml * fix polkadot companion job name * add opened event to polkadot-companion-labels.yml * Dont label on timeouts * increase timeouts * increase timeouts again... to be sure * Switch to s3krit/await-status-action Turns out Sibz/await-status-action looks at /ref/statuses, which lists ALL statuses (i.e., if you send a pending and a failure for the same context, it will see both and assume the job is still pending.). I forked and point at /ref/status, which shows a combined summary of each status (i.e., only ever shows the most recent status of a single context). --- .../workflows/polkadot-companion-labels.yml | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/polkadot-companion-labels.yml diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml new file mode 100644 index 0000000000..dd00e72d6c --- /dev/null +++ b/.github/workflows/polkadot-companion-labels.yml @@ -0,0 +1,29 @@ +name: Check Polkadot Companion and Label + +on: + pull_request: + types: [opened, synchronize] + +jobs: + check_status: + runs-on: ubuntu-latest + steps: + - name: Monitor the status of the gitlab-check-companion-build job + uses: s3krit/await-status-action@4528ebbdf6e29bbec77c41caad1b2dec148ba894 + id: 'check-companion-status' + with: + authToken: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ github.event.pull_request.head.sha }} + contexts: 'continuous-integration/gitlab-check-polkadot-companion-build' + timeout: 1800 + notPresentTimeout: 3600 # It can take quite a while before the job starts... + - name: Label success + uses: andymckay/labeler@master + if: steps.check-companion-status.outputs.result == 'success' + with: + remove-labels: 'A7-needspolkadotpr' + - name: Label failure + uses: andymckay/labeler@master + if: steps.check-companion-status.outputs.result == 'failure' + with: + add-labels: 'A7-needspolkadotpr' -- GitLab From 3ca1d91f0f53f03d95e5335ad3d7d125e379c9d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Jun 2020 13:15:21 +0200 Subject: [PATCH 028/144] Print bad mandatory error (#6416) * Print bad mandatory error This prints the error that leads to bad mandatory. * Update frame/system/src/lib.rs Co-authored-by: Shawn Tabrizi * Adds missing trait import Co-authored-by: Shawn Tabrizi --- frame/system/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index b64b5d58f7..71e1f38770 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -112,7 +112,7 @@ use sp_runtime::{ self, CheckEqual, AtLeast32Bit, Zero, SignedExtension, Lookup, LookupError, SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, SaturatedConversion, MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, DispatchInfoOf, PostDispatchInfoOf, + Dispatchable, DispatchInfoOf, PostDispatchInfoOf, Printable, }, offchain::storage_lock::BlockNumberProvider, }; @@ -1591,7 +1591,10 @@ impl SignedExtension for CheckWeight where // Since mandatory dispatched do not get validated for being overweight, we are sensitive // to them actually being useful. Block producers are thus not allowed to include mandatory // extrinsics that result in error. - if info.class == DispatchClass::Mandatory && result.is_err() { + if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { + "Bad mandantory".print(); + e.print(); + Err(InvalidTransaction::BadMandatory)? } -- GitLab From 97583766efde5eb628ebda73add493aa3f87d8e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Jun 2020 15:48:09 +0200 Subject: [PATCH 029/144] Track last blocks in informant display (#6429) This implements tracking of the last seen blocks in informant display to prevent printing the import message twice. In Cumulus we first import blocks as part of the block building with `new_best == false` and set the best block after we know which one was included by the relay chain. This leads to printing the import messages two times. This pr solves the problem by track the latest seen blocks to not print the message twice. --- client/informant/src/lib.rs | 59 ++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 720f5d6a1b..6a8acbadc3 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -28,9 +28,7 @@ use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; use sp_transaction_pool::TransactionPool; use sp_utils::{status_sinks, mpsc::tracing_unbounded}; -use std::fmt::Display; -use std::sync::Arc; -use std::time::Duration; +use std::{fmt::Display, sync::Arc, time::Duration, collections::VecDeque}; use parking_lot::Mutex; mod display; @@ -96,12 +94,30 @@ where future::ready(()) }); + future::join( + display_notifications, + display_block_import(client, format.prefix), + ).map(|_| ()) +} + +fn display_block_import( + client: Arc, + prefix: String, +) -> impl Future +where + C: UsageProvider + HeaderMetadata + BlockchainEvents, + >::Error: Display, +{ let mut last_best = { let info = client.usage_info(); Some((info.chain.best_number, info.chain.best_hash)) }; - let display_block_import = client.import_notification_stream().for_each(move |n| { + // Hashes of the last blocks we have seen at import. + let mut last_blocks = VecDeque::new(); + let max_blocks_to_track = 100; + + client.import_notification_stream().for_each(move |n| { // detect and log reorganizations. if let Some((ref last_num, ref last_hash)) = last_best { if n.header.parent_hash() != last_hash && n.is_new_best { @@ -114,7 +130,7 @@ where match maybe_ancestor { Ok(ref ancestor) if ancestor.hash != *last_hash => info!( "♻️ {}Reorg on #{},{} to #{},{}, common ancestor #{},{}", - format.prefix, + prefix, Colour::Red.bold().paint(format!("{}", last_num)), last_hash, Colour::Green.bold().paint(format!("{}", n.header.number())), n.hash, Colour::White.bold().paint(format!("{}", ancestor.number)), ancestor.hash, @@ -129,18 +145,25 @@ where last_best = Some((n.header.number().clone(), n.hash.clone())); } - info!( - target: "substrate", - "✨ {}Imported #{} ({})", - format.prefix, - Colour::White.bold().paint(format!("{}", n.header.number())), - n.hash, - ); - future::ready(()) - }); - future::join( - display_notifications, - display_block_import - ).map(|_| ()) + // If we already printed a message for a given block recently, + // we should not print it again. + if !last_blocks.contains(&n.hash) { + last_blocks.push_back(n.hash.clone()); + + if last_blocks.len() > max_blocks_to_track { + last_blocks.pop_front(); + } + + info!( + target: "substrate", + "✨ {}Imported #{} ({})", + prefix, + Colour::White.bold().paint(format!("{}", n.header.number())), + n.hash, + ); + } + + future::ready(()) + }) } -- GitLab From 2bb79cb6500d0bc338ca4c480c66ced0d284b0a4 Mon Sep 17 00:00:00 2001 From: Dan Forbes Date: Fri, 19 Jun 2020 06:55:15 -0700 Subject: [PATCH 030/144] Simple Docs for Atomic Swap Pallet (#6434) * Simple Docs for Atomic Swap Pallet * Fix copy-and-paste error --- frame/atomic-swap/src/lib.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index aa33c9a849..8686138c2b 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -15,7 +15,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Atomic swap support pallet +//! # Atomic Swap +//! +//! A module for atomically sending funds. +//! +//! - [`atomic_swap::Trait`](./trait.Trait.html) +//! - [`Call`](./enum.Call.html) +//! - [`Module`](./struct.Module.html) +//! +//! ## Overview +//! +//! A module for atomically sending funds from an origin to a target. A proof +//! is used to allow the target to approve (claim) the swap. If the swap is not +//! claimed within a specified duration of time, the sender may cancel it. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! * `create_swap` - called by a sender to register a new atomic swap +//! * `claim_swap` - called by the target to approve a swap +//! * `cancel_swap` - may be called by a sender after a specified duration // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -- GitLab From a2c493d4de8e5f0a3c8bf9b0518b45aa35b2cb71 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 19 Jun 2020 15:56:09 +0200 Subject: [PATCH 031/144] More descriptive error message when invalid slot duration is used (#6430) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit Forked at: d735e4d0b5378c227f81a5127a1d4544de112fd8 No parent branch. * Errors if slot_duration is zero * Errors if slot_duration is zero * Revert "Errors if slot_duration is zero" This reverts commit a9e9820e124571f73d3e498e969a74d01fd3fe96. * Update client/consensus/slots/src/lib.rs Co-authored-by: Bastian Köcher --- client/consensus/slots/src/lib.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index fe1c6bab7b..950f83fbce 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -455,7 +455,7 @@ impl SlotDuration { CB: FnOnce(ApiRef, &BlockId) -> sp_blockchain::Result, T: SlotData + Encode + Decode + Debug, { - match client.get_aux(T::SLOT_KEY)? { + let slot_duration = match client.get_aux(T::SLOT_KEY)? { Some(v) => ::decode(&mut &v[..]) .map(SlotDuration) .map_err(|_| { @@ -479,7 +479,15 @@ impl SlotDuration { Ok(SlotDuration(genesis_slot_duration)) } + }?; + + if slot_duration.slot_duration() == 0 { + return Err(sp_blockchain::Error::Msg( + "Invalid value for slot_duration: the value must be greater than 0.".into(), + )) } + + Ok(slot_duration) } /// Returns slot data value. -- GitLab From d343bfc87acf1eb5cbff49827eea2d59b729724b Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Fri, 19 Jun 2020 15:59:29 +0200 Subject: [PATCH 032/144] Root origin use no filter by default. Scheduler and Democracy dispatch without asserting BaseCallFilter (#6408) * make system root origin build runtime origin with no filter * additional doc --- frame/democracy/src/tests.rs | 21 +++++++++++++++++++-- frame/scheduler/src/lib.rs | 17 ++++++++++++++--- frame/support/src/origin.rs | 24 ++++++++++++++++++++---- frame/system/src/lib.rs | 3 ++- 4 files changed, 55 insertions(+), 10 deletions(-) diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 85bb1ffcfb..c1bab3c021 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -22,7 +22,8 @@ use std::cell::RefCell; use codec::Encode; use frame_support::{ impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, - impl_outer_event, ord_parameter_types, traits::{Contains, OnInitialize}, weights::Weight, + impl_outer_event, ord_parameter_types, traits::{Contains, OnInitialize, Filter}, + weights::Weight, }; use sp_core::H256; use sp_runtime::{ @@ -74,6 +75,14 @@ impl_outer_event! { } } +// Test that a fitlered call can be dispatched. +pub struct BaseFilter; +impl Filter for BaseFilter { + fn filter(call: &Call) -> bool { + !matches!(call, &Call::Balances(pallet_balances::Call::set_balance(..))) + } +} + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; @@ -84,7 +93,7 @@ parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type BaseCallFilter = (); + type BaseCallFilter = BaseFilter; type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -225,6 +234,14 @@ fn set_balance_proposal(value: u64) -> Vec { Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() } +#[test] +fn set_balance_proposal_is_correctly_filtered_out() { + for i in 0..10 { + let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + assert!(!::BaseCallFilter::filter(&call)); + } +} + fn set_balance_proposal_hash(value: u64) -> H256 { BlakeTwo256::hash(&set_balance_proposal(value)[..]) } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 00189c6b5d..18b4eef0a8 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -399,7 +399,7 @@ mod tests { use frame_support::{ impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, - traits::{OnInitialize, OnFinalize}, + traits::{OnInitialize, OnFinalize, Filter}, weights::constants::RocksDbWeight, }; use sp_core::H256; @@ -469,6 +469,15 @@ mod tests { scheduler, } } + + // Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. + pub struct BaseFilter; + impl Filter for BaseFilter { + fn filter(call: &Call) -> bool { + !matches!(call, Call::Logger(_)) + } + } + // For testing the pallet, we construct most of a mock runtime. This means // first constructing a configuration type (`Test`) which `impl`s each of the // configuration traits of pallets we want to use. @@ -481,7 +490,7 @@ mod tests { pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl system::Trait for Test { - type BaseCallFilter = (); + type BaseCallFilter = BaseFilter; type Origin = Origin; type Call = Call; type Index = u64; @@ -540,7 +549,9 @@ mod tests { #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + let call = Call::Logger(logger::Call::log(42, 1000)); + assert!(!::BaseCallFilter::filter(&call)); + Scheduler::do_schedule(4, None, 127, call); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index 038c8540f6..77fe86cc55 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -163,8 +163,8 @@ macro_rules! impl_outer_origin { Modules { }; $( $module:ident $( < $generic:ident > )? $( { $generic_instance:ident } )? ,)* ) => { - // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`. - // One can use `OriginTrait::reset_filter` to do so. + // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`, except + // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. #[derive(Clone)] pub struct $name { caller: $caller_name, @@ -241,28 +241,40 @@ macro_rules! impl_outer_origin { #[allow(dead_code)] impl $name { + /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. pub fn none() -> Self { $system::RawOrigin::None.into() } + /// Create with system root origin and no filter. pub fn root() -> Self { $system::RawOrigin::Root.into() } + /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. pub fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { $system::RawOrigin::Signed(by).into() } } impl From<$system::Origin<$runtime>> for $name { + /// Convert to runtime origin: + /// * root origin is built with no filter + /// * others use `frame-system::Trait::BaseCallFilter` fn from(x: $system::Origin<$runtime>) -> Self { let mut o = $name { caller: $caller_name::system(x), filter: $crate::sp_std::rc::Rc::new(Box::new(|_| true)), }; - $crate::traits::OriginTrait::reset_filter(&mut o); + + // Root has no filter + if !matches!(o.caller, $caller_name::system($system::Origin::<$runtime>::Root)) { + $crate::traits::OriginTrait::reset_filter(&mut o); + } + o } } impl Into<$crate::sp_std::result::Result<$system::Origin<$runtime>, $name>> for $name { + /// NOTE: converting to pallet origin loses the origin filter information. fn into(self) -> $crate::sp_std::result::Result<$system::Origin<$runtime>, Self> { if let $caller_name::system(l) = self.caller { Ok(l) @@ -272,6 +284,8 @@ macro_rules! impl_outer_origin { } } impl From::AccountId>> for $name { + /// Convert to runtime origin with caller being system signed or none and use filter + /// `frame-system::Trait::BaseCallFilter`. fn from(x: Option<<$runtime as $system::Trait>::AccountId>) -> Self { <$system::Origin<$runtime>>::from(x).into() } @@ -279,6 +293,7 @@ macro_rules! impl_outer_origin { $( $crate::paste::item! { impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $name { + /// Convert to runtime origin using `frame-system::Trait::BaseCallFilter`. fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { let mut o = $name { caller: $caller_name::[< $module $( _ $generic_instance )? >](x), @@ -294,6 +309,7 @@ macro_rules! impl_outer_origin { $name, >> for $name { + /// NOTE: converting to pallet origin loses the origin filter information. fn into(self) -> $crate::sp_std::result::Result< $module::Origin < $( $generic )? $(, $module::$generic_instance )? >, Self, @@ -402,7 +418,7 @@ mod tests { #[test] fn test_default_filter() { assert_eq!(OriginWithSystem::root().filter_call(&0), true); - assert_eq!(OriginWithSystem::root().filter_call(&1), false); + assert_eq!(OriginWithSystem::root().filter_call(&1), true); assert_eq!(OriginWithSystem::none().filter_call(&0), true); assert_eq!(OriginWithSystem::none().filter_call(&1), false); assert_eq!(OriginWithSystem::signed(0).filter_call(&0), true); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 71e1f38770..db6b528bcf 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -149,7 +149,8 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { } pub trait Trait: 'static + Eq + Clone { - /// The basic call filter to use in Origin. + /// The basic call filter to use in Origin. All origins are built with this filter as base, + /// except Root. type BaseCallFilter: Filter; /// The `Origin` type used by dispatchable calls. -- GitLab From 111b628ccd2895dce26f2af12ee5f28206fa9e2b Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Fri, 19 Jun 2020 16:00:06 +0200 Subject: [PATCH 033/144] llow decl-module to have a where clause with trailing comma (#6431) --- frame/support/src/dispatch.rs | 3 ++- frame/support/test/tests/system.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index edb6e62639..d9a3561802 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -283,7 +283,7 @@ macro_rules! decl_module { $trait_instance:ident: $trait_name:ident $( , I: $instantiable:path $( = $module_default_instance:path )? )? > - for enum $call_type:ident where origin: $origin_type:ty $(, $where_ty:ty: $where_bound:path )* { + for enum $call_type:ident where origin: $origin_type:ty $(, $where_ty:ty: $where_bound:path )* $(,)? { $( $t:tt )* } ) => { @@ -317,6 +317,7 @@ macro_rules! decl_module { origin: $origin_type:ty, system = $system:ident $(, $where_ty:ty: $where_bound:path )* + $(,)? { $($t:tt)* } diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index c3c47d2065..0d6a22fd1a 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -31,7 +31,7 @@ pub trait Trait: 'static + Eq + Clone { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, {} } impl Module { -- GitLab From 18707b3314bc2c48991ca68a53539fc98b9a092d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 19 Jun 2020 16:00:33 +0200 Subject: [PATCH 034/144] .gitlab-ci.yml: Use promtool from paritytech/tools:latest image (#6425) --- .gitlab-ci.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 76ae934900..a21affdeb9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -364,10 +364,8 @@ test-prometheus-alerting-rules: image: paritytech/tools:latest <<: *kubernetes-build script: - - curl -L https://github.com/prometheus/prometheus/releases/download/v2.19.0/prometheus-2.19.0.linux-amd64.tar.gz --output prometheus.tar.gz - - tar -xzf prometheus.tar.gz - - ./prometheus-*/promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml - - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | ./prometheus-*/promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml + - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml + - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml #### stage: build -- GitLab From 26aec420371a41f7202bb50a98dac4b0fcce591e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Jun 2020 16:00:58 +0200 Subject: [PATCH 035/144] Update sync chain info on own block import (#6424) Before we only updated the chain info of sync when we have imported something using the import queue. However, if you import your own blocks, this is not done using the import queue and so sync is not updated. If we don't do this, it can lead to sync switching to "major sync" mode because sync is not informed about new blocks. This especially happens on Cumulus, where a collator is selected multiple times to include its block into the relay chain and thus, sync switches to major sync mode while the node is still building blocks. --- client/network/src/protocol.rs | 5 +++++ client/network/src/service.rs | 9 +++++++++ client/service/src/lib.rs | 9 ++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 06f117b3bb..ccd4463901 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -544,6 +544,11 @@ impl Protocol { self.sync.update_chain_info(&info.best_hash, info.best_number); } + /// Inform sync about an own imported block. + pub fn own_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.sync.update_chain_info(&hash, number); + } + fn update_peer_info(&mut self, who: &PeerId) { if let Some(info) = self.sync.peer_info(who) { if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 0d5f037a37..90fffc8a37 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -746,6 +746,12 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::UpdateChain); } + /// Inform the network service about an own imported block. + pub fn own_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::OwnBlockImported(hash, number)); + } } impl sp_consensus::SyncOracle @@ -812,6 +818,7 @@ enum ServiceToWorkerMsg { }, DisconnectPeer(PeerId), UpdateChain, + OwnBlockImported(B::Hash, NumberFor), } /// Main network worker. Must be polled in order for the network to advance. @@ -1142,6 +1149,8 @@ impl Future for NetworkWorker { this.network_service.user_protocol_mut().disconnect_peer(&who), ServiceToWorkerMsg::UpdateChain => this.network_service.user_protocol_mut().update_chain(), + ServiceToWorkerMsg::OwnBlockImported(hash, number) => + this.network_service.user_protocol_mut().own_block_imported(hash, number), } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 37bac171c9..5184886efd 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -55,7 +55,7 @@ use sc_network::{NetworkService, NetworkStatus, network_state::NetworkState, Pee use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use parity_util_mem::MallocSizeOf; use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}}; @@ -382,6 +382,13 @@ fn build_network_future< if announce_imported_blocks { network.service().announce_block(notification.hash, Vec::new()); } + + if let sp_consensus::BlockOrigin::Own = notification.origin { + network.service().own_block_imported( + notification.hash, + notification.header.number().clone(), + ); + } } // We poll `finality_notification_stream`, but we only take the last event. -- GitLab From 4c67aeec54bfa255eed88d391e2f401434d1e51f Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 19 Jun 2020 16:01:16 +0200 Subject: [PATCH 036/144] client/authority-discovery: Compare PeerIds and not Multihashes (#6414) In order to tell whether an address is the local nodes address the authority discovery module previously compared the Multihash within the `p2p` Multiaddr protocol. rust-libp2p recently switched to a new PeerId representation (see [1]). Multihashes of the same PeerId in the new and the old format don't equal. Instead of comparing the Multihashes, this patch ensures the module compares the PeerIds [1] https://github.com/libp2p/rust-libp2p/issues/555 --- client/authority-discovery/src/lib.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index e816600b7c..ba1c9f0fa8 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -72,6 +72,7 @@ use sc_network::{ ExHashT, Multiaddr, NetworkStateInfo, + PeerId, }; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; use sp_core::crypto::{key_types, Pair}; @@ -430,7 +431,7 @@ where .get(&remote_key) .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; - let local_peer_id = multiaddr::Protocol::P2p(self.network.local_peer_id().into()); + let local_peer_id = self.network.local_peer_id(); let remote_addresses: Vec = values.into_iter() .map(|(_k, v)| { @@ -459,9 +460,23 @@ where .into_iter() .flatten() // Ignore own addresses. - .filter(|addr| !addr.iter().any(|protocol| - protocol == local_peer_id - )) + .filter(|addr| !addr.iter().any(|protocol| { + // Parse to PeerId first as Multihashes of old and new PeerId + // representation don't equal. + // + // See https://github.com/libp2p/rust-libp2p/issues/555 for + // details. + if let multiaddr::Protocol::P2p(hash) = protocol { + let peer_id = match PeerId::from_multihash(hash) { + Ok(peer_id) => peer_id, + Err(_) => return true, // Discard address. + }; + + return peer_id == local_peer_id; + } + + false // Multiaddr does not contain a PeerId. + })) .collect(); if !remote_addresses.is_empty() { -- GitLab From c8c16d1803c5d84565ffb788ab35e2cedf289530 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 19 Jun 2020 17:40:39 +0300 Subject: [PATCH 037/144] add network propagated metrics (#6438) --- client/network/src/protocol.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ccd4463901..9bec906787 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -48,7 +48,7 @@ use sp_runtime::traits::{ use sp_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; -use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; +use prometheus_endpoint::{Registry, Gauge, Counter, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; @@ -145,6 +145,7 @@ struct Metrics { fork_targets: Gauge, finality_proofs: GaugeVec, justifications: GaugeVec, + propagated_extrinsics: Counter, } impl Metrics { @@ -190,6 +191,10 @@ impl Metrics { )?; register(g, r)? }, + propagated_extrinsics: register(Counter::new( + "sync_propagated_extrinsics", + "Number of transactions propagated to at least one peer", + )?, r)?, }) } } @@ -1237,6 +1242,12 @@ impl Protocol { } } + if propagated_to.len() > 0 { + if let Some(ref metrics) = self.metrics { + metrics.propagated_extrinsics.inc(); + } + } + propagated_to } -- GitLab From f9d4d302e797e82aecf9782c4af8dc6abc4074a0 Mon Sep 17 00:00:00 2001 From: Denis Pisarev Date: Fri, 19 Jun 2020 18:53:43 +0200 Subject: [PATCH 038/144] change (ci): add interruptible to kubernetes jobs (#6441) --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a21affdeb9..594c9d1dde 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -56,6 +56,7 @@ default: - kubernetes-parity-build environment: name: parity-build + interruptible: true .docker-env: &docker-env image: paritytech/ci-linux:production -- GitLab From 9cbda1eab93a1384086b4b88b6995669ed6435d2 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Fri, 19 Jun 2020 20:12:42 +0200 Subject: [PATCH 039/144] Avoid multisig reentrancy (#6445) --- frame/multisig/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 50bd96aca3..fc7a6c25b3 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -553,10 +553,13 @@ impl Module { // verify weight ensure!(call.get_dispatch_info().weight <= max_weight, Error::::WeightTooLow); - let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); - T::Currency::unreserve(&m.depositor, m.deposit); + // Clean up storage before executing call to avoid an possibility of reentrancy + // attack. >::remove(&id, call_hash); Self::clear_call(&call_hash); + T::Currency::unreserve(&m.depositor, m.deposit); + + let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); Self::deposit_event(RawEvent::MultisigExecuted( who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) )); -- GitLab From d0ab405f548225ef2d7fb4a4fbcba136c1e7db8b Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 19 Jun 2020 21:27:16 +0200 Subject: [PATCH 040/144] Validate encoding of extrinsics passed to runtime (#6442) * Validate encoding of extrinsics passed to runtime * Bump codec version explicitly --- Cargo.lock | 4 ++-- bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/executor/Cargo.toml | 2 +- bin/node/inspect/Cargo.toml | 2 +- bin/node/primitives/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- bin/node/testing/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/api/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/basic-authorship/Cargo.toml | 2 +- client/block-builder/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/epochs/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/consensus/slots/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/executor/common/Cargo.toml | 2 +- client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmtime/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/light/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- client/transaction-pool/graph/Cargo.toml | 2 +- frame/assets/Cargo.toml | 2 +- frame/atomic-swap/Cargo.toml | 2 +- frame/aura/Cargo.toml | 2 +- frame/authority-discovery/Cargo.toml | 2 +- frame/authorship/Cargo.toml | 2 +- frame/babe/Cargo.toml | 2 +- frame/balances/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 2 +- frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/contracts/common/Cargo.toml | 2 +- frame/contracts/rpc/Cargo.toml | 2 +- frame/contracts/rpc/runtime-api/Cargo.toml | 2 +- frame/democracy/Cargo.toml | 2 +- frame/elections-phragmen/Cargo.toml | 2 +- frame/elections/Cargo.toml | 2 +- frame/evm/Cargo.toml | 2 +- frame/example-offchain-worker/Cargo.toml | 2 +- frame/example/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- frame/finality-tracker/Cargo.toml | 2 +- frame/generic-asset/Cargo.toml | 2 +- frame/grandpa/Cargo.toml | 2 +- frame/identity/Cargo.toml | 2 +- frame/im-online/Cargo.toml | 2 +- frame/indices/Cargo.toml | 2 +- frame/membership/Cargo.toml | 2 +- frame/metadata/Cargo.toml | 2 +- frame/multisig/Cargo.toml | 2 +- frame/nicks/Cargo.toml | 2 +- frame/offences/Cargo.toml | 2 +- frame/offences/benchmarking/Cargo.toml | 4 ++-- frame/proxy/Cargo.toml | 2 +- frame/randomness-collective-flip/Cargo.toml | 2 +- frame/recovery/Cargo.toml | 2 +- frame/scored-pool/Cargo.toml | 2 +- frame/session/Cargo.toml | 2 +- frame/session/benchmarking/Cargo.toml | 2 +- frame/society/Cargo.toml | 2 +- frame/staking/Cargo.toml | 2 +- frame/staking/fuzzer/Cargo.toml | 2 +- frame/sudo/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- frame/support/test/Cargo.toml | 2 +- frame/system/Cargo.toml | 2 +- frame/system/benchmarking/Cargo.toml | 2 +- frame/system/rpc/runtime-api/Cargo.toml | 2 +- frame/timestamp/Cargo.toml | 2 +- frame/transaction-payment/Cargo.toml | 2 +- frame/transaction-payment/rpc/Cargo.toml | 2 +- frame/transaction-payment/rpc/runtime-api/Cargo.toml | 2 +- frame/treasury/Cargo.toml | 2 +- frame/utility/Cargo.toml | 2 +- frame/vesting/Cargo.toml | 2 +- primitives/api/Cargo.toml | 2 +- primitives/api/proc-macro/src/decl_runtime_apis.rs | 3 ++- primitives/api/proc-macro/src/impl_runtime_apis.rs | 5 ++++- primitives/api/src/lib.rs | 5 ++++- primitives/api/test/Cargo.toml | 2 +- primitives/api/test/tests/ui/mock_only_one_error_type.stderr | 4 ++-- primitives/application-crypto/Cargo.toml | 2 +- primitives/arithmetic/Cargo.toml | 2 +- primitives/authority-discovery/Cargo.toml | 2 +- primitives/authorship/Cargo.toml | 2 +- primitives/block-builder/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- primitives/consensus/aura/Cargo.toml | 2 +- primitives/consensus/babe/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- primitives/consensus/pow/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/externalities/Cargo.toml | 2 +- primitives/finality-grandpa/Cargo.toml | 2 +- primitives/finality-tracker/Cargo.toml | 2 +- primitives/inherents/Cargo.toml | 2 +- primitives/io/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/sandbox/Cargo.toml | 2 +- primitives/session/Cargo.toml | 2 +- primitives/staking/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/test-primitives/Cargo.toml | 2 +- primitives/timestamp/Cargo.toml | 2 +- primitives/transaction-pool/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 2 +- primitives/version/Cargo.toml | 2 +- primitives/wasm-interface/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/client/Cargo.toml | 2 +- test-utils/runtime/transaction-pool/Cargo.toml | 2 +- utils/fork-tree/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/frame/rpc/support/Cargo.toml | 2 +- utils/frame/rpc/system/Cargo.toml | 2 +- 131 files changed, 141 insertions(+), 134 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86744c2537..8a620ab5c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4759,9 +4759,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" +checksum = "a74f02beb35d47e0706155c9eac554b50c671e0d868fe8296bcdf44a9a4847bf" dependencies = [ "arrayvec 0.5.1", "bitvec", diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 6b99b6f807..714c9d93a9 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet template" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [dependencies.frame-support] default-features = false diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 9042edc8fa..16bb0fe0cb 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } aura = { version = "2.0.0-rc3", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } balances = { version = "2.0.0-rc3", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 74edf2f257..4e2c0151b7 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -34,7 +34,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 64799129fc..2c5a5db281 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } node-primitives = { version = "2.0.0-rc3", path = "../primitives" } node-runtime = { version = "2.0.0-rc3", path = "../runtime" } sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor" } diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 1c2f316b40..91202191f1 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } derive_more = "0.99" log = "0.4.8" sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli" } diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index ec8d58fe27..75a8cbb332 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/system" } sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/application-crypto" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 7cc4018fb6..b26b53cd6c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index f423c58fe6..6bf4abc03d 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -17,7 +17,7 @@ pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } sc-service = { version = "0.8.0-rc3", features = ["test-helpers", "db"], path = "../../../client/service" } sc-client-db = { version = "0.8.0-rc3", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } pallet-contracts = { version = "2.0.0-rc3", path = "../../../frame/contracts" } pallet-grandpa = { version = "2.0.0-rc3", path = "../../../frame/grandpa" } pallet-indices = { version = "2.0.0-rc3", path = "../../../frame/indices" } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 064470ea7c..fa570f5759 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -22,7 +22,7 @@ tiny-bip39 = "0.7" substrate-bip39 = "0.4.1" hex = "0.4.0" hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } frame-system = { version = "2.0.0-rc3", path = "../../../frame/system" } pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } pallet-transaction-payment = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 7730168136..606c1c4813 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-client-api" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } sc-executor = { version = "0.8.0-rc3", path = "../executor" } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 8833306f06..114092ab31 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ prost-build = "0.6.1" [dependencies] bytes = "0.5.0" -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.0" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 964d1c3798..6e3ec49ea7 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -12,7 +12,7 @@ description = "Basic implementation of block-authoring logic." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index ff142887ff..ce94526e0c 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -21,7 +21,7 @@ sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } sp-block-builder = { version = "2.0.0-rc3", path = "../../primitives/block-builder" } sc-client-api = { version = "2.0.0-rc3", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 1cb1c4657b..04bdc19fe4 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ sp-consensus-aura = { version = "0.8.0-rc3", path = "../../../primitives/consens sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.4" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index cf4e32a94c..4f8f4db264 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-consensus-babe" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../primitives/consensus/babe" } sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index c1c47a1a68..3911a59b72 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" fork-tree = { version = "2.0.0-rc3", path = "../../../utils/fork-tree" } sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc3"} diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index b48f54a325..cd8d4cab42 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index dae0a924b7..25a137d214 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-client-api = { version = "2.0.0-rc3", path = "../../api" } sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d9fcdf7f6a..22ca6e64aa 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -20,7 +20,7 @@ kvdb-memorydb = "0.6.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" parity-util-mem = { version = "0.6.1", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } blake2-rfc = "0.2.18" sc-client-api = { version = "2.0.0-rc3", path = "../api" } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 96d2d9eb94..f1499693f3 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index e9d2586e36..a6ff79a067 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" derive_more = "0.99.2" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } wasmi = "0.6.2" sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } sp-allocator = { version = "2.0.0-rc3", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 94f28f744b..f3c2ee2c67 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" wasmi = "0.6.2" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-executor-common = { version = "0.8.0-rc3", path = "../common" } sp-wasm-interface = { version = "2.0.0-rc3", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0-rc3", path = "../../../primitives/runtime-interface" } diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 730bc74932..6d008bcee6 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-executor-common = { version = "0.8.0-rc3", path = "../common" } sp-wasm-interface = { version = "2.0.0-rc3", path = "../../../primitives/wasm-interface" } sp-runtime-interface = { version = "2.0.0-rc3", path = "../../../primitives/runtime-interface" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index d101fe66d0..29b9cdaeba 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -22,7 +22,7 @@ log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" assert_matches = "1.3.0" -parity-scale-codec = { version = "1.3.0", features = ["derive"] } +parity-scale-codec = { version = "1.3.1", features = ["derive"] } sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } sp-arithmetic = { version = "2.0.0-rc3", path = "../../primitives/arithmetic" } sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index 1ef35f72ac..490da15364 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -20,7 +20,7 @@ sp-core = { version = "2.0.0-rc2", path = "../../primitives/core" } sp-state-machine = { version = "0.8.0-rc2", path = "../../primitives/state-machine" } sc-client-api = { version = "2.0.0-rc2", path = "../api" } sp-api = { version = "2.0.0-rc2", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-executor = { version = "0.8.0-rc2", path = "../executor" } [features] diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 94a7b2b57d..8467aa1154 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.6.1" bitflags = "1.2.0" bs58 = "0.3.1" bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 7f1d23f558..a9cd8c4dea 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -22,7 +22,7 @@ log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" sp-offchain = { version = "2.0.0-rc3", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } rand = "0.7.2" diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index c7aad9a1b3..3e3195b914 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -12,7 +12,7 @@ description = "Substrate RPC interfaces." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } jsonrpc-core = "14.2.0" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index f3557ca6b2..9cda4451c1 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sc-rpc-api = { version = "0.8.0-rc3", path = "../rpc-api" } sc-client-api = { version = "2.0.0-rc3", path = "../api" } sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.2.0" log = "0.4.8" diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 71e8e74c4c..1740e6fad4 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -59,7 +59,7 @@ sc-light = { version = "2.0.0-rc3", path = "../light" } sc-client-api = { version = "2.0.0-rc3", path = "../api" } sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } sc-client-db = { version = "0.8.0-rc3", default-features = false, path = "../db" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-executor = { version = "0.8.0-rc3", path = "../executor" } sc-transaction-pool = { version = "2.0.0-rc3", path = "../transaction-pool" } sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 5835dc14c9..7d61e86708 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -41,4 +41,4 @@ sc-client-api = { version = "2.0.0-rc3", path = "../../api" } sc-block-builder = { version = "0.8.0-rc3", path = "../../block-builder" } sc-executor = { version = "0.8.0-rc3", path = "../../executor" } sp-panic-handler = { version = "2.0.0-rc3", path = "../../../primitives/panic-handler" } -parity-scale-codec = "1.3.0" +parity-scale-codec = "1.3.1" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 5b30a2230a..ee9bbf7273 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ parking_lot = "0.10.0" log = "0.4.8" sc-client-api = { version = "2.0.0-rc3", path = "../api" } sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index dce8ce48d2..e837f40a34 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -12,7 +12,7 @@ description = "Substrate transaction pool implementation." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index e174b31988..cb16af0f53 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -28,7 +28,7 @@ linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } substrate-test-runtime = { version = "2.0.0-rc3", path = "../../../test-utils/runtime" } criterion = "0.3" diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 0039e8898c..33882671a4 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index a3bf95b2e2..ce32d8b783 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index f28171ae91..5a60d23270 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 5cd93d3c31..e3c7a256a9 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/authority-discovery" } sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } pallet-session = { version = "2.0.0-rc3", features = ["historical" ], path = "../session", default-features = false } diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 6f7ae89762..9cc25b075d 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-authorship = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/authorship" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index dcc6b29376..5e9dcf7fb5 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -12,7 +12,7 @@ description = "Consensus extension module for BABE consensus. Collects on-chain targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index d65090b540..02b5732e00 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 3b383d2a96..5c6306ebbb 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] linregress = "0.1" paste = "0.1" -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-api = { version = "2.0.0-rc3", path = "../../primitives/api", default-features = false } sp-runtime-interface = { version = "2.0.0-rc3", path = "../../primitives/runtime-interface", default-features = false } sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime", default-features = false } diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 071fc83b72..5517f3b03f 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 6c41875c63..b2ba8d014a 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } pwasm-utils = { version = "0.12.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } parity-wasm = { version = "0.41.0", default-features = false } wasmi-validation = { version = "0.3.0", default-features = false } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 4a0581524e..520b723933 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index ccfb2ad511..75dc1bf3fb 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -12,7 +12,7 @@ description = "Node-specific RPC methods for interaction with contracts." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 60d7965342..3596677316 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/runtime" } pallet-contracts-primitives = { version = "2.0.0-rc3", default-features = false, path = "../../common" } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 9af038aab3..fea378caca 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index a33ce0ed29..08cdc5a98e 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet based on seq-Phragmén election method." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } sp-npos-elections = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/npos-elections" } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 60bb2dcb62..d03ad4f056 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index c465090743..1a6d691cde 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../timestamp" } diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index d32f206de8..f93ffcf9e4 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME example pallet for offchain worker" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 89be881437..597f2266c3 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 303cf1c1f7..a922333eb9 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME executives engine" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index a8e11e83f9..497f4fdec7 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index eb62b2986f..cdac7a6d6d 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 99a5dad149..1ec939c9bd 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-finality-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/finality-grandpa" } diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 39eae1b689..0435d8c086 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 99979a47c0..2f89ff2cb2 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } pallet-authorship = { version = "2.0.0-rc3", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index a52ad8e331..2c856064e7 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../primitives/keyring" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 6ea035c3f7..e0c94da308 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index 459f76b5e8..a8fb9eae5f 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -12,7 +12,7 @@ description = "Decodable variant of the RuntimeMetadata." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 00f3e51f38..44ea4dc3e9 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 229c548ead..544a0dc734 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index fa36f42e4a..0b8b74c4a9 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 366736ac4c..ad8520484e 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME offences pallet benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../benchmarking" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../system" } @@ -28,7 +28,7 @@ sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../.. sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../../staking/reward-curve" } pallet-timestamp = { version = "2.0.0-rc3", path = "../../timestamp" } serde = { version = "1.0.101" } diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index beb924ab27..215f362cc8 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index fb3775a625..7e64539491 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 88a738b058..33f7b5e521 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index b5009079d2..d1e0a5d62e 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet for scored pools" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 6b74e3ef5f..38eef24bc6 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index da969932b1..b2c70c28d1 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -22,7 +22,7 @@ pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../. [dev-dependencies] serde = { version = "1.0.101" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../../staking/reward-curve" } sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io" } diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index c9e4f9cb40..eb28046d3f 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 829f39b70b..45b2b42d97 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-npos-elections = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/npos-elections" } sp-io ={ version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index 6362ebf414..97d79ecad5 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5" -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } pallet-staking = { version = "2.0.0-rc3", path = "..", features = ["runtime-benchmarks"] } pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../reward-curve" } pallet-session = { version = "2.0.0-rc3", path = "../../session" } diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 5aef45f8c2..1bdd2aab69 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index dd9354d019..e648eaf32d 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } frame-metadata = { version = "11.0.0-rc3", default-features = false, path = "../metadata" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index a68edc6238..d6e7d7d633 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io", default-features = false } sp-state-machine = { version = "0.8.0-rc3", optional = true, path = "../../../primitives/state-machine" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../" } diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index ca1b5d6a12..af3288a907 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", path = "../../primitives/io", default-features = false } diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 71896f8a39..b1636c21e5 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME System benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../benchmarking" } diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index 4f599d6d47..d919fd1b58 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [features] default = ["std"] diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 804f17a23a..7d08164bdd 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io", optional = true } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index a8b23bfda0..f7a15d962b 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet to manage transaction payments" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 2f1e0f06d7..22be6e700b 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -12,7 +12,7 @@ description = "RPC interface for the transaction payment module." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 8ffa6fb6ee..e63b94cb4b 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/runtime" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../../support" } diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 4c0aae3713..338a6f1dec 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 65eae9d4cc..f14274d709 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 885768e365..a98a59acef 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 6e46586975..46bd9164ac 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -12,7 +12,7 @@ description = "Substrate runtime api primitives" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-api-proc-macro = { version = "2.0.0-rc3", path = "proc-macro" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 7e1391b7b5..93ec09d0e6 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -191,7 +191,8 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { input: &I, error_desc: &'static str, ) -> std::result::Result { - ::decode( + ::decode_with_depth_limit( + #crate_::MAX_EXTRINSIC_DEPTH, &mut &#crate_::Encode::encode(input)[..], ).map_err(|e| format!("{} {}", error_desc, e.what())) } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 2878bd2c13..b999b9eefd 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -83,7 +83,10 @@ fn generate_impl_call( Ok( quote!( #( - let #pnames : #ptypes = match #c_iter::Decode::decode(&mut #input) { + let #pnames : #ptypes = match #c_iter::DecodeLimit::decode_all_with_depth_limit( + #c_iter::MAX_EXTRINSIC_DEPTH, + &mut #input + ) { Ok(input) => input, Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), }; diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index ec15c1eae7..0aaf72e2d2 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -69,11 +69,14 @@ pub use sp_std::{slice, mem}; #[cfg(feature = "std")] use sp_std::result; #[doc(hidden)] -pub use codec::{Encode, Decode}; +pub use codec::{Encode, Decode, DecodeLimit}; use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; +/// Maximum nesting level for extrinsics. +pub const MAX_EXTRINSIC_DEPTH: u32 = 256; + /// Declares given traits as runtime apis. /// /// The macro will create two declarations, one for using on the client side and one for using diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 79bd37c826..04181d93f0 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } sp-blockchain = { version = "2.0.0-rc3", path = "../../blockchain" } sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr index 281f0024ee..65d05e83a7 100644 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr @@ -16,9 +16,9 @@ error[E0277]: the trait bound `u32: std::convert::From` is 27 | | } | |_^ the trait `std::convert::From` is not implemented for `u32` | - ::: $WORKSPACE/primitives/api/src/lib.rs:347:35 + ::: $WORKSPACE/primitives/api/src/lib.rs:350:35 | -347 | type Error: std::fmt::Debug + From; +350 | type Error: std::fmt::Debug + From; | ------------ required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt` | = help: the following implementations were found: diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index ebc716cd72..29f385a54a 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 0912d6a69e..b4c655c968 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 4201cd342b..584aef986a 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.0" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 4ca6f06207..eb52ca3e0c 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 968107e69a..8f8976949d 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } [features] diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index bf1e5d8354..b4c22a524a 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -17,7 +17,7 @@ log = "0.4.8" lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0-rc3", path = "../consensus/common" } sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } sp-block-builder = { version = "2.0.0-rc3", path = "../block-builder" } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 9dddc47fe2..24b82f4642 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 538b0a5b05..978b415dc5 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } merlin = { version = "2.0", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 3f256d3f73..d63abab883 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -26,7 +26,7 @@ sp-std = { version = "2.0.0-rc3", path = "../../std" } sp-version = { version = "2.0.0-rc3", path = "../../version" } sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } sp-utils = { version = "2.0.0-rc3", path = "../../utils" } -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3"} diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index f8b254ff6e..9f9fedb76c 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -16,7 +16,7 @@ sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 69872349ff..3c37f57e70 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index faa95fd9a1..3af61bbeb0 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -16,4 +16,4 @@ targets = ["x86_64-unknown-linux-gnu"] sp-storage = { version = "2.0.0-rc3", path = "../storage" } sp-std = { version = "2.0.0-rc3", path = "../std" } environmental = { version = "1.1.1" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 254c27e8dd..27315b0ff9 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index 779507ea81..60ed88c110 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME module that tracks the last finalized block, as perceived b targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 2e3820d392..503aa29d29 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = { version = "0.10.0", optional = true } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } [features] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index df66740d65..8bb113b1f1 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } hash-db = { version = "0.15.2", default-features = false } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 3a3d625b5f..12d070b47c 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -18,7 +18,7 @@ sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../tracing" } sp-runtime-interface-proc-macro = { version = "2.0.0-rc3", path = "proc-macro" } sp-externalities = { version = "0.8.0-rc3", optional = true, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index c38faa15a8..d3508c0e8b 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } sp-arithmetic = { version = "2.0.0-rc3", default-features = false, path = "../arithmetic" } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 1a2175aebd..dfd3a44053 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-io = { version = "2.0.0-rc3", default-features = false, path = "../io" } sp-wasm-interface = { version = "2.0.0-rc3", default-features = false, path = "../wasm-interface" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [dev-dependencies] wabt = "0.9.2" diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index b3dd297ceb..4abcb80d24 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -12,7 +12,7 @@ description = "Primitives for sessions" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index ce44d8a0f7..7ec400d74a 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -12,7 +12,7 @@ description = "A crate which contains primitives that are useful for implementat targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index b94195db90..77b9e304d4 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -21,7 +21,7 @@ trie-root = "0.16.0" sp-trie = { version = "2.0.0-rc3", path = "../trie" } sp-core = { version = "2.0.0-rc3", path = "../core" } sp-panic-handler = { version = "2.0.0-rc3", path = "../panic-handler" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } num-traits = "0.2.8" rand = "0.7.2" sp-externalities = { version = "0.8.0-rc3", path = "../externalities" } diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 87cb398579..abc47f6f9a 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 59c090eb46..5b2217f0f3 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = { version = "0.2", optional = true } diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 94daf519db..6417ae8c29 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.1", optional = true } derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 823d5bc5df..d99a3d1ae7 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -17,7 +17,7 @@ name = "bench" harness = false [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.21.0", default-features = false } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index eb8c845075..18357953d7 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 39684a0381..c2e70ce1e4 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" sp-std = { version = "2.0.0-rc3", path = "../std", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 331c7e2801..f5604ceb23 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -22,7 +22,7 @@ sc-service = { version = "0.8.0-rc3", default-features = false, features = ["tes futures = "0.3.4" hash-db = "0.15.2" sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 9016ddbff5..e307522ead 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -17,7 +17,7 @@ sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path sp-consensus-aura = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/babe" } sp-block-builder = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } frame-executive = { version = "2.0.0-rc3", default-features = false, path = "../../frame/executive" } sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../primitives/keyring" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 1b41b63b99..7a69f5ed22 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -21,7 +21,7 @@ substrate-test-runtime = { version = "2.0.0-rc3", path = "../../runtime" } sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service" } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 0dc14f4edf..e5c93ef8ad 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../client" } parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index de3f19f0a1..6c8410ab76 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -13,4 +13,4 @@ documentation = "https://docs.rs/fork-tree" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index d53c4c2fe0..364dc472cb 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -22,7 +22,7 @@ sp-externalities = { version = "0.8.0-rc3", path = "../../../primitives/external sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } structopt = "0.3.8" -codec = { version = "1.3.0", package = "parity-scale-codec" } +codec = { version = "1.3.1", package = "parity-scale-codec" } [features] default = ["db"] diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 092c2c402d..d7e4259635 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3.0", features = ["compat"] } jsonrpc-client-transports = { version = "14.2.0", default-features = false, features = ["http"] } jsonrpc-core = "14.2.0" -codec = { package = "parity-scale-codec", version = "1" } +codec = { package = "parity-scale-codec", version = "1.3.1" } serde = "1" frame-support = { version = "2.0.0-rc3", path = "../../../../frame/support" } sp-storage = { version = "2.0.0-rc3", path = "../../../../primitives/storage" } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 21cd00ebd4..a03a08b3ff 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-client-api = { version = "2.0.0-rc3", path = "../../../../client/api" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.4", features = ["compat"] } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" -- GitLab From 80323210d378bc39c4154946055f52b005d6a873 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Jun 2020 23:14:14 +0200 Subject: [PATCH 041/144] Fix Babe secondary plain slots claiming (#6451) We need to check that the public key of an authority exists in our keystore before we can successfully claim a plain secondary slot. --- client/consensus/babe/src/authorship.rs | 42 ++++++++++++++++++++++++- client/keystore/src/lib.rs | 4 +-- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index dfca491eaa..682e04e380 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -169,11 +169,13 @@ fn claim_secondary_slot( } else { None } - } else { + } else if keystore.read().has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot_number, authority_index: *authority_index as u32, })) + } else { + None }; if let Some(pre_digest) = pre_digest { @@ -283,3 +285,41 @@ fn claim_primary_slot( None } + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{sr25519::Pair, crypto::Pair as _}; + use sp_consensus_babe::{AuthorityId, BabeEpochConfiguration, AllowedSlots}; + + #[test] + fn claim_secondary_plain_slot_works() { + let keystore = sc_keystore::Store::new_in_memory(); + let valid_public_key = dbg!(keystore.write().sr25519_generate_new( + AuthorityId::ID, + Some(sp_core::crypto::DEV_PHRASE), + ).unwrap()); + + let authorities = vec![ + (AuthorityId::from(Pair::generate().0.public()), 5), + (AuthorityId::from(Pair::generate().0.public()), 7), + ]; + + let mut epoch = Epoch { + epoch_index: 10, + start_slot: 0, + duration: 20, + authorities: authorities.clone(), + randomness: Default::default(), + config: BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + }; + + assert!(claim_slot(10, &epoch, &keystore).is_none()); + + epoch.authorities.push((valid_public_key.clone().into(), 10)); + assert_eq!(claim_slot(10, &epoch, &keystore).unwrap().1, valid_public_key.into()); + } +} diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 5be4d6d12c..aed60ab0cf 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -272,7 +272,7 @@ impl Store { fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { let mut public_keys: Vec> = self.additional.keys() .into_iter() - .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) + .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) .collect(); if let Some(path) = &self.path { @@ -365,7 +365,7 @@ impl BareCryptoStore for Store { .map(|k| sr25519::Public::from_slice(k.as_slice())) .collect() }) - .unwrap_or_default() + .unwrap_or_default() } fn sr25519_generate_new( -- GitLab From 1fae45f63aaaac4d8bd74605387bcdf14d150ebb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 20 Jun 2020 11:49:18 +0200 Subject: [PATCH 042/144] sp-npos-elections should not depend on itself (#6444) This removes the `dev-dependency` onto `sp-npos-elections` from itself. A crate should not depend on itself directly, especially not to make any macros work. --- Cargo.lock | 1 - primitives/npos-elections/Cargo.toml | 1 - .../npos-elections/compact/src/assignment.rs | 4 ++-- primitives/npos-elections/compact/src/lib.rs | 22 ++++++++++--------- primitives/npos-elections/src/tests.rs | 10 ++++----- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a620ab5c2..7597682395 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7634,7 +7634,6 @@ dependencies = [ "rand 0.7.3", "serde", "sp-arithmetic", - "sp-npos-elections", "sp-npos-elections-compact", "sp-runtime", "sp-std", diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 3e425f2adc..7982c8ce4d 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -21,7 +21,6 @@ sp-arithmetic = { version = "2.0.0-rc3", default-features = false, path = "../ar [dev-dependencies] substrate-test-utils = { version = "2.0.0-rc3", path = "../../test-utils" } rand = "0.7.3" -sp-npos-elections = { version = "2.0.0-rc3", path = "." } sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } [features] diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs index fb3d4330b0..96c68ece92 100644 --- a/primitives/npos-elections/compact/src/assignment.rs +++ b/primitives/npos-elections/compact/src/assignment.rs @@ -18,8 +18,8 @@ //! Code generation for the ratio assignment type. use crate::field_name_for; -use proc_macro2::{TokenStream as TokenStream2}; -use syn::{GenericArgument}; +use proc_macro2::TokenStream as TokenStream2; +use syn::GenericArgument; use quote::quote; fn from_impl(count: usize) -> TokenStream2 { diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index 022782a7dd..1b88ff6531 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -224,17 +224,19 @@ fn struct_def( } fn imports() -> Result { - let sp_phragmen_imports = match crate_name("sp-npos-elections") { - Ok(sp_npos_elections) => { - let ident = syn::Ident::new(&sp_npos_elections, Span::call_site()); - quote!( extern crate #ident as _phragmen; ) + if std::env::var("CARGO_PKG_NAME").unwrap() == "sp-npos-elections" { + Ok(quote! { + use crate as _phragmen; + }) + } else { + match crate_name("sp-npos-elections") { + Ok(sp_npos_elections) => { + let ident = syn::Ident::new(&sp_npos_elections, Span::call_site()); + Ok(quote!( extern crate #ident as _phragmen; )) + }, + Err(e) => Err(syn::Error::new(Span::call_site(), &e)), } - Err(e) => return Err(syn::Error::new(Span::call_site(), &e)), - }; - - Ok(quote!( - #sp_phragmen_imports - )) + } } struct CompactSolutionDef { diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 47d619339b..08923c6949 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -17,8 +17,6 @@ //! Tests for npos-elections. -#![cfg(test)] - use crate::mock::*; use crate::{ seq_phragmen, balance_solution, build_support_map, is_score_better, helpers::*, @@ -772,10 +770,12 @@ fn score_comparison_large_value() { mod compact { use codec::{Decode, Encode}; - use crate::{generate_compact_solution_type, VoteWeight}; - use super::{AccountId}; + use super::AccountId; // these need to come from the same dev-dependency `sp-npos-elections`, not from the crate. - use sp_npos_elections::{Assignment, StakedAssignment, Error as PhragmenError, ExtendedBalance}; + use crate::{ + generate_compact_solution_type, VoteWeight, Assignment, StakedAssignment, + Error as PhragmenError, ExtendedBalance, + }; use sp_std::{convert::{TryInto, TryFrom}, fmt::Debug}; use sp_arithmetic::Percent; -- GitLab From 6c16d15302d30c6d368882568a2e9b536da4a5b0 Mon Sep 17 00:00:00 2001 From: s3krit Date: Sat, 20 Jun 2020 13:31:12 +0200 Subject: [PATCH 043/144] Don't autolabel insubstantial PRs 'pleasereview' (#6447) --- .github/workflows/auto-label-prs.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml index cfa4f302fe..f0b8e9b343 100644 --- a/.github/workflows/auto-label-prs.yml +++ b/.github/workflows/auto-label-prs.yml @@ -1,4 +1,4 @@ -name: Label new PRs +name: Label PRs on: pull_request: types: [opened,ready_for_review] @@ -7,14 +7,15 @@ jobs: label-new-prs: runs-on: ubuntu-latest steps: - - name: Label new drafts + - name: Label drafts uses: andymckay/labeler@master if: github.event.pull_request.draft == true with: add-labels: 'A3-inprogress' - - name: Label new PRs + remove-labels: 'A0-pleasereview' + - name: Label PRs uses: andymckay/labeler@master - if: github.event.pull_request.draft == false + if: github.event.pull_request.draft == false && ! contains(github.event.pull_request.labels.*.name, 'A2-insubstantial') with: add-labels: 'A0-pleasereview' remove-labels: 'A3-inprogress' -- GitLab From ef2a6c1c047f7afc1587006ddadcf16edf29d402 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 21 Jun 2020 13:34:19 +0300 Subject: [PATCH 044/144] change everything to transaction (#6440) --- client/network/src/protocol.rs | 121 +++++++++++++++++---------------- client/network/src/service.rs | 18 ++--- client/service/src/builder.rs | 8 +-- 3 files changed, 74 insertions(+), 73 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9bec906787..90076552a7 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -72,15 +72,15 @@ pub use generic_proto::LegacyConnectionKillError; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); -/// Interval at which we propagate extrinsics; +/// Interval at which we propagate transactions; const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); /// Maximim number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximim number of known extrinsic hashes to keep for a peer. +/// Maximim number of known transaction hashes to keep for a peer. /// /// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_EXTRINSICS: usize = 10240; // ~300kb per peer + overhead. +const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. /// Maximim number of transaction validation request we keep at any moment. const MAX_PENDING_TRANSACTIONS: usize = 8192; @@ -106,25 +106,25 @@ mod rep { pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// Reputation change when a peer sends us any extrinsic. + /// Reputation change when a peer sends us any transaction. /// - /// This forces node to verify it, thus the negative value here. Once extrinsic is verified, - /// reputation change should be refunded with `ANY_EXTRINSIC_REFUND` - pub const ANY_EXTRINSIC: Rep = Rep::new(-(1 << 4), "Any extrinsic"); - /// Reputation change when a peer sends us any extrinsic that is not invalid. - pub const ANY_EXTRINSIC_REFUND: Rep = Rep::new(1 << 4, "Any extrinsic (refund)"); - /// Reputation change when a peer sends us an extrinsic that we didn't know about. - pub const GOOD_EXTRINSIC: Rep = Rep::new(1 << 7, "Good extrinsic"); - /// Reputation change when a peer sends us a bad extrinsic. - pub const BAD_EXTRINSIC: Rep = Rep::new(-(1 << 12), "Bad extrinsic"); + /// This forces node to verify it, thus the negative value here. Once transaction is verified, + /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` + pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); + /// Reputation change when a peer sends us any transaction that is not invalid. + pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); + /// Reputation change when a peer sends us an transaction that we didn't know about. + pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); + /// Reputation change when a peer sends us a bad transaction. + pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); /// We sent an RPC query to the given node, but it failed. pub const RPC_FAILED: Rep = Rep::new(-(1 << 12), "Remote call failed"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// We received an unexpected response. pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); - /// We received an unexpected extrinsic packet. - pub const UNEXPECTED_EXTRINSICS: Rep = Rep::new_fatal("Unexpected extrinsics packet"); + /// We received an unexpected transaction packet. + pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); /// We received an unexpected light node request. pub const UNEXPECTED_REQUEST: Rep = Rep::new_fatal("Unexpected block request packet"); /// Peer has different genesis. @@ -145,7 +145,7 @@ struct Metrics { fork_targets: Gauge, finality_proofs: GaugeVec, justifications: GaugeVec, - propagated_extrinsics: Counter, + propagated_transactions: Counter, } impl Metrics { @@ -191,8 +191,8 @@ impl Metrics { )?; register(g, r)? }, - propagated_extrinsics: register(Counter::new( - "sync_propagated_extrinsics", + propagated_transactions: register(Counter::new( + "sync_propagated_transactions", "Number of transactions propagated to at least one peer", )?, r)?, }) @@ -221,11 +221,11 @@ impl Future for PendingTransaction { pub struct Protocol { /// Interval at which we call `tick`. tick_timeout: Pin + Send>>, - /// Interval at which we call `propagate_extrinsics`. + /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - /// Pending extrinsic verification tasks. + /// Pending transactions verification tasks. pending_transactions: FuturesUnordered, config: ProtocolConfig, genesis_hash: B::Hash, @@ -280,7 +280,7 @@ struct Peer { /// Requests we are no longer interested in. obsolete_requests: HashMap, /// Holds a set of transactions known to this peer. - known_extrinsics: LruHashSet, + known_transactions: LruHashSet, /// Holds a set of blocks known to this peer. known_blocks: LruHashSet, /// Request counter, @@ -601,7 +601,7 @@ impl Protocol { return outcome; }, GenericMessage::Transactions(m) => - self.on_extrinsics(who, m), + self.on_transactions(who, m), GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), GenericMessage::RemoteCallResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), @@ -720,8 +720,8 @@ impl Protocol { // Print some diagnostics. if let Some(peer) = self.context_data.peers.get(&who) { debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ - known_extrinsics: {:?}; known_blocks: {:?}; best_hash: {:?}; best_number: {:?})", - who, peer.info.protocol_version, peer.info.roles, peer.known_extrinsics, peer.known_blocks, + known_transactions: {:?}; known_blocks: {:?}; best_hash: {:?}; best_number: {:?})", + who, peer.info.protocol_version, peer.info.roles, peer.known_transactions, peer.known_blocks, peer.info.best_hash, peer.info.best_number); } else { debug!(target: "sync", "Peer clogged before being properly connected"); @@ -1048,7 +1048,7 @@ impl Protocol { let peer = Peer { info, block_request: None, - known_extrinsics: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_EXTRINSICS) + known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) .expect("Constant is nonzero")), known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) .expect("Constant is nonzero")), @@ -1137,28 +1137,29 @@ impl Protocol { .map(|(peer_id, peer)| (peer_id, peer.info.roles)) } - /// Called when peer sends us new extrinsics - fn on_extrinsics( + /// Called when peer sends us new transactions + fn on_transactions( &mut self, who: PeerId, - extrinsics: message::Transactions + transactions: message::Transactions ) { - // sending extrinsic to light node is considered a bad behavior + // sending transaction to light node is considered a bad behavior if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to send extrinsic to the light node", who); + trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_EXTRINSICS); + self.peerset_handle.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); return; } - // Accept extrinsics only when fully synced + // Accept transactions only when fully synced if self.sync.status().state != SyncState::Idle { - trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); + trace!(target: "sync", "{} Ignoring transactions while syncing", who); return; } - trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); + + trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - for t in extrinsics { + for t in transactions { if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { debug!( target: "sync", @@ -1169,9 +1170,9 @@ impl Protocol { } let hash = self.transaction_pool.hash_of(&t); - peer.known_extrinsics.insert(hash); + peer.known_transactions.insert(hash); - self.peerset_handle.report_peer(who.clone(), rep::ANY_EXTRINSIC); + self.peerset_handle.report_peer(who.clone(), rep::ANY_TRANSACTION); self.pending_transactions.push(PendingTransaction { peer_id: who.clone(), @@ -1181,45 +1182,45 @@ impl Protocol { } } - fn on_handle_extrinsic_import(&mut self, who: PeerId, import: TransactionImport) { + fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { match import { - TransactionImport::KnownGood => self.peerset_handle.report_peer(who, rep::ANY_EXTRINSIC_REFUND), - TransactionImport::NewGood => self.peerset_handle.report_peer(who, rep::GOOD_EXTRINSIC), - TransactionImport::Bad => self.peerset_handle.report_peer(who, rep::BAD_EXTRINSIC), + TransactionImport::KnownGood => self.peerset_handle.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::NewGood => self.peerset_handle.report_peer(who, rep::GOOD_TRANSACTION), + TransactionImport::Bad => self.peerset_handle.report_peer(who, rep::BAD_TRANSACTION), TransactionImport::None => {}, } } - /// Propagate one extrinsic. - pub fn propagate_extrinsic( + /// Propagate one transaction. + pub fn propagate_transaction( &mut self, hash: &H, ) { - debug!(target: "sync", "Propagating extrinsic [{:?}]", hash); + debug!(target: "sync", "Propagating transaction [{:?}]", hash); // Accept transactions only when fully synced if self.sync.status().state != SyncState::Idle { return; } - if let Some(extrinsic) = self.transaction_pool.transaction(hash) { - let propagated_to = self.do_propagate_extrinsics(&[(hash.clone(), extrinsic)]); + if let Some(transaction) = self.transaction_pool.transaction(hash) { + let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); self.transaction_pool.on_broadcasted(propagated_to); } } - fn do_propagate_extrinsics( + fn do_propagate_transactions( &mut self, - extrinsics: &[(H, B::Extrinsic)], + transactions: &[(H, B::Extrinsic)], ) -> HashMap> { let mut propagated_to = HashMap::new(); for (who, peer) in self.context_data.peers.iter_mut() { - // never send extrinsics to the light node + // never send transactions to the light node if !peer.info.roles.is_full() { continue; } - let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions .iter() - .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) + .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) .cloned() .unzip(); @@ -1244,22 +1245,22 @@ impl Protocol { if propagated_to.len() > 0 { if let Some(ref metrics) = self.metrics { - metrics.propagated_extrinsics.inc(); + metrics.propagated_transactions.inc(); } } propagated_to } - /// Call when we must propagate ready extrinsics to peers. - pub fn propagate_extrinsics(&mut self) { - debug!(target: "sync", "Propagating extrinsics"); + /// Call when we must propagate ready transactions to peers. + pub fn propagate_transactions(&mut self) { + debug!(target: "sync", "Propagating transactions"); // Accept transactions only when fully synced if self.sync.status().state != SyncState::Idle { return; } - let extrinsics = self.transaction_pool.transactions(); - let propagated_to = self.do_propagate_extrinsics(&extrinsics); + let transactions = self.transaction_pool.transactions(); + let propagated_to = self.do_propagate_transactions(&transactions); self.transaction_pool.on_broadcasted(propagated_to); } @@ -1983,7 +1984,7 @@ impl NetworkBehaviour for Protocol { } while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { - self.propagate_extrinsics(); + self.propagate_transactions(); } for (id, mut r) in self.sync.block_requests() { @@ -2011,7 +2012,7 @@ impl NetworkBehaviour for Protocol { self.pending_messages.push_back(event); } if let Poll::Ready(Some((peer_id, result))) = self.pending_transactions.poll_next_unpin(cx) { - self.on_handle_extrinsic_import(peer_id, result); + self.on_handle_transaction_import(peer_id, result); } if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); @@ -2050,7 +2051,7 @@ impl NetworkBehaviour for Protocol { } Some(Fallback::Transactions) => { if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { - self.on_extrinsics(peer_id, m); + self.on_transactions(peer_id, m); } else { warn!(target: "sub-libp2p", "Failed to decode transactions list"); } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 90fffc8a37..93560a6c0b 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -587,15 +587,15 @@ impl NetworkService { /// All transactions will be fetched from the `TransactionPool` that was passed at /// initialization as part of the configuration and propagated to peers. pub fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateExtrinsics); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); } /// You must call when new transaction is imported by the transaction pool. /// /// This transaction will be fetched from the `TransactionPool` that was passed at /// initialization as part of the configuration and propagated to peers. - pub fn propagate_extrinsic(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateExtrinsic(hash)); + pub fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); } /// Make sure an important block is propagated to peers. @@ -798,8 +798,8 @@ impl NetworkStateInfo for NetworkService /// /// Each entry corresponds to a method of `NetworkService`. enum ServiceToWorkerMsg { - PropagateExtrinsic(H), - PropagateExtrinsics, + PropagateTransaction(H), + PropagateTransactions, RequestJustification(B::Hash, NumberFor), AnnounceBlock(B::Hash, Vec), GetValue(record::Key), @@ -1119,10 +1119,10 @@ impl Future for NetworkWorker { this.network_service.user_protocol_mut().announce_block(hash, data), ServiceToWorkerMsg::RequestJustification(hash, number) => this.network_service.user_protocol_mut().request_justification(&hash, number), - ServiceToWorkerMsg::PropagateExtrinsic(hash) => - this.network_service.user_protocol_mut().propagate_extrinsic(&hash), - ServiceToWorkerMsg::PropagateExtrinsics => - this.network_service.user_protocol_mut().propagate_extrinsics(), + ServiceToWorkerMsg::PropagateTransaction(hash) => + this.network_service.user_protocol_mut().propagate_transaction(&hash), + ServiceToWorkerMsg::PropagateTransactions => + this.network_service.user_protocol_mut().propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 23d736d98b..f492c5d494 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1067,7 +1067,7 @@ ServiceBuilder< spawn_handle.spawn( "on-transaction-imported", - extrinsic_notifications(transaction_pool.clone(), network.clone()), + transaction_notifications(transaction_pool.clone(), network.clone()), ); // Prometheus metrics. @@ -1245,7 +1245,7 @@ ServiceBuilder< } } -async fn extrinsic_notifications( +async fn transaction_notifications( transaction_pool: Arc, network: Arc::Hash>> ) @@ -1253,10 +1253,10 @@ async fn extrinsic_notifications( TBl: BlockT, TExPool: MaintainedTransactionPool::Hash>, { - // extrinsic notifications + // transaction notifications transaction_pool.import_notification_stream() .for_each(move |hash| { - network.propagate_extrinsic(hash); + network.propagate_transaction(hash); let status = transaction_pool.status(); telemetry!(SUBSTRATE_INFO; "txpool.import"; "ready" => status.ready, -- GitLab From 41970e7ea56d59d45861d65490a329fe216df74e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sun, 21 Jun 2020 11:34:38 +0100 Subject: [PATCH 045/144] node: spawn block authoring and grandpa voter as blocking tasks (#6446) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * service: add spawner for essential tasks * node: spawn block authoring and grandpa voter as blocking tasks * Apply suggestions from code review Co-authored-by: Bastian Köcher --- bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 6 +-- client/service/src/lib.rs | 17 +++++++- client/service/src/task_manager.rs | 59 +++++++++++++++++++++++++++ 4 files changed, 79 insertions(+), 7 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index e8578ab5b5..e330c17b24 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -142,7 +142,7 @@ pub fn new_full(config: Configuration) -> Result Result> + Send + Unpin + S /// The task name is a `&'static str` as opposed to a `String`. The reason for that is that /// in order to avoid memory consumption issues with the Prometheus metrics, the set of /// possible task names has to be bounded. + #[deprecated(note = "Use `spawn_task_handle().spawn() instead.")] fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static); /// Spawns a task in the background that runs the future passed as /// parameter. The given task is considered essential, i.e. if it errors we /// trigger a service exit. + #[deprecated(note = "Use `spawn_essential_task_handle().spawn() instead.")] fn spawn_essential_task(&self, name: &'static str, task: impl Future + Send + 'static); + /// Returns a handle for spawning essential tasks. Any task spawned through this handle is + /// considered essential, i.e. if it errors we trigger a service exit. + fn spawn_essential_task_handle(&self) -> SpawnEssentialTaskHandle; + /// Returns a handle for spawning tasks. fn spawn_task_handle(&self) -> SpawnTaskHandle; @@ -269,13 +275,20 @@ where let _ = essential_failed.send(()); }); - let _ = self.spawn_task(name, essential_task); + let _ = self.spawn_task_handle().spawn(name, essential_task); } fn spawn_task_handle(&self) -> SpawnTaskHandle { self.task_manager.spawn_handle() } + fn spawn_essential_task_handle(&self) -> SpawnEssentialTaskHandle { + SpawnEssentialTaskHandle::new( + self.essential_failed_tx.clone(), + self.task_manager.spawn_handle(), + ) + } + fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>> { Box::pin( self.rpc_handlers.handle_request(request, mem.metadata.clone()) diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs index 9cd92538e3..5a400f70df 100644 --- a/client/service/src/task_manager.rs +++ b/client/service/src/task_manager.rs @@ -28,6 +28,7 @@ use prometheus_endpoint::{ CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 }; use sc_client_api::CloneableSpawn; +use sp_utils::mpsc::TracingUnboundedSender; use crate::config::TaskType; mod prometheus_future; @@ -149,6 +150,64 @@ impl futures01::future::Executor for SpawnTaskHandle { } } +/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any +/// task spawned through it fails. The service should be on the receiver side +/// and will shut itself down whenever it receives any message, i.e. an +/// essential task has failed. +pub struct SpawnEssentialTaskHandle { + essential_failed_tx: TracingUnboundedSender<()>, + inner: SpawnTaskHandle, +} + +impl SpawnEssentialTaskHandle { + /// Creates a new `SpawnEssentialTaskHandle`. + pub fn new( + essential_failed_tx: TracingUnboundedSender<()>, + spawn_task_handle: SpawnTaskHandle, + ) -> SpawnEssentialTaskHandle { + SpawnEssentialTaskHandle { + essential_failed_tx, + inner: spawn_task_handle, + } + } + + /// Spawns the given task with the given name. + /// + /// See also [`SpawnTaskHandle::spawn`]. + pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { + self.spawn_inner(name, task, TaskType::Async) + } + + /// Spawns the blocking task with the given name. + /// + /// See also [`SpawnTaskHandle::spawn_blocking`]. + pub fn spawn_blocking( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { + self.spawn_inner(name, task, TaskType::Blocking) + } + + fn spawn_inner( + &self, + name: &'static str, + task: impl Future + Send + 'static, + task_type: TaskType, + ) { + use futures::sink::SinkExt; + let mut essential_failed = self.essential_failed_tx.clone(); + let essential_task = std::panic::AssertUnwindSafe(task) + .catch_unwind() + .map(move |_| { + log::error!("Essential task `{}` failed. Shutting down service.", name); + let _ = essential_failed.send(()); + }); + + let _ = self.inner.spawn_inner(name, essential_task, task_type); + } +} + /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to -- GitLab From a5bcfedc9e6764ad9d186027ff0c5b3d639032f8 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 21 Jun 2020 12:34:53 +0200 Subject: [PATCH 046/144] pallet-atomic-swap: generialized swap action (#6421) * pallet-atomic-swap: generialized swap action * Bump spec_version * Fix weight calculation * Remove unnecessary type aliases --- bin/node/runtime/src/lib.rs | 2 +- frame/atomic-swap/src/lib.rs | 145 +++++++++++++++++++++++---------- frame/atomic-swap/src/tests.rs | 10 ++- 3 files changed, 109 insertions(+), 48 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index cf3d262298..cf1b0de8f7 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -97,7 +97,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 253, + spec_version: 254, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 8686138c2b..56aa67310f 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -42,10 +42,10 @@ mod tests; -use sp_std::prelude::*; +use sp_std::{prelude::*, marker::PhantomData, ops::{Deref, DerefMut}}; use sp_io::hashing::blake2_256; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, ensure, + Parameter, decl_module, decl_storage, decl_event, decl_error, ensure, traits::{Get, Currency, ReservableCurrency, BalanceStatus}, weights::Weight, dispatch::DispatchResult, @@ -55,37 +55,98 @@ use codec::{Encode, Decode}; use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. -#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode)] -pub struct PendingSwap { +#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +pub struct PendingSwap { /// Source of the swap. - pub source: AccountId, - /// Balance value of the swap. - pub balance: Balance, + pub source: T::AccountId, + /// Action of this swap. + pub action: T::SwapAction, /// End block of the lock. - pub end_block: BlockNumber, + pub end_block: T::BlockNumber, } -/// Balance type from the pallet's point of view. -pub type BalanceFor = <::Currency as Currency<::AccountId>>::Balance; +/// Hashed proof type. +pub type HashedProof = [u8; 32]; -/// AccountId type from the pallet's point of view. -pub type AccountIdFor = ::AccountId; +/// Definition of a pending atomic swap action. It contains the following three phrases: +/// +/// - **Reserve**: reserve the resources needed for a swap. This is to make sure that **Claim** +/// succeeds with best efforts. +/// - **Claim**: claim any resources reserved in the first phrase. +/// - **Cancel**: cancel any resources reserved in the first phrase. +pub trait SwapAction { + /// Reserve the resources needed for the swap, from the given `source`. The reservation is + /// allowed to fail. If that is the case, the the full swap creation operation is cancelled. + fn reserve(&self, source: &T::AccountId) -> DispatchResult; + /// Claim the reserved resources, with `source` and `target`. Returns whether the claim + /// succeeds. + fn claim(&self, source: &T::AccountId, target: &T::AccountId) -> bool; + /// Weight for executing the operation. + fn weight(&self) -> Weight; + /// Cancel the resources reserved in `source`. + fn cancel(&self, source: &T::AccountId); +} -/// BlockNumber type from the pallet's point of view. -pub type BlockNumberFor = ::BlockNumber; +/// A swap action that only allows transferring balances. +#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode)] +pub struct BalanceSwapAction> { + value: ::AccountId>>::Balance, + _marker: PhantomData, +} -/// PendingSwap type from the pallet's point of view. -pub type PendingSwapFor = PendingSwap, BalanceFor, BlockNumberFor>; +impl BalanceSwapAction where + C: ReservableCurrency, +{ + /// Create a new swap action value of balance. + pub fn new(value: ::AccountId>>::Balance) -> Self { + Self { value, _marker: PhantomData } + } +} -/// Hashed proof type. -pub type HashedProof = [u8; 32]; +impl Deref for BalanceSwapAction where + C: ReservableCurrency, +{ + type Target = ::AccountId>>::Balance; + + fn deref(&self) -> &Self::Target { + &self.value + } +} + +impl DerefMut for BalanceSwapAction where + C: ReservableCurrency, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.value + } +} + +impl SwapAction for BalanceSwapAction where + C: ReservableCurrency, +{ + fn reserve(&self, source: &T::AccountId) -> DispatchResult { + C::reserve(&source, self.value) + } + + fn claim(&self, source: &T::AccountId, target: &T::AccountId) -> bool { + C::repatriate_reserved(source, target, self.value, BalanceStatus::Free).is_ok() + } + + fn weight(&self) -> Weight { + T::DbWeight::get().reads_writes(1, 1) + } + + fn cancel(&self, source: &T::AccountId) { + C::unreserve(source, self.value); + } +} /// Atomic swap's pallet configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From> + Into<::Event>; - /// The currency mechanism. - type Currency: ReservableCurrency; + /// Swap action. + type SwapAction: SwapAction + Parameter; /// Limit of proof size. /// /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs @@ -103,7 +164,7 @@ decl_storage! { trait Store for Module as AtomicSwap { pub PendingSwaps: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof - => Option>; + => Option>; } } @@ -121,6 +182,8 @@ decl_error! { AlreadyClaimed, /// Swap does not exist. NotExist, + /// Claim action mismatch. + ClaimActionMismatch, /// Duration has not yet passed for the swap to be cancelled. DurationNotPassed, } @@ -129,14 +192,13 @@ decl_error! { decl_event!( /// Event of atomic swap pallet. pub enum Event where - Balance = BalanceFor, - AccountId = AccountIdFor, - PendingSwap = PendingSwapFor, + AccountId = ::AccountId, + PendingSwap = PendingSwap, { /// Swap created. NewSwap(AccountId, HashedProof, PendingSwap), /// Swap claimed. The last parameter indicates whether the execution succeeds. - SwapClaimed(AccountId, HashedProof, Balance, bool), + SwapClaimed(AccountId, HashedProof, bool), /// Swap cancelled. SwapCancelled(AccountId, HashedProof), } @@ -164,10 +226,10 @@ decl_module! { #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] fn create_swap( origin, - target: AccountIdFor, + target: T::AccountId, hashed_proof: HashedProof, - balance: BalanceFor, - duration: BlockNumberFor, + action: T::SwapAction, + duration: T::BlockNumber, ) { let source = ensure_signed(origin)?; ensure!( @@ -175,11 +237,11 @@ decl_module! { Error::::AlreadyExist ); - T::Currency::reserve(&source, balance)?; + action.reserve(&source)?; let swap = PendingSwap { source, - balance, + action, end_block: frame_system::Module::::block_number() + duration, }; PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); @@ -194,13 +256,17 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// - `proof`: Revealed proof of the claim. - #[weight = T::DbWeight::get().reads_writes(2, 2) + /// - `action`: Action defined in the swap, it must match the entry in blockchain. Otherwise + /// the operation fails. This is used for weight calculation. + #[weight = T::DbWeight::get().reads_writes(1, 1) .saturating_add(40_000_000) .saturating_add((proof.len() as Weight).saturating_mul(100)) + .saturating_add(action.weight()) ] fn claim_swap( origin, proof: Vec, + action: T::SwapAction, ) -> DispatchResult { ensure!( proof.len() <= T::ProofLimit::get() as usize, @@ -212,18 +278,14 @@ decl_module! { let swap = PendingSwaps::::get(&target, hashed_proof) .ok_or(Error::::InvalidProof)?; + ensure!(swap.action == action, Error::::ClaimActionMismatch); - let succeeded = T::Currency::repatriate_reserved( - &swap.source, - &target, - swap.balance, - BalanceStatus::Free, - ).is_ok(); + let succeeded = swap.action.claim(&swap.source, &target); PendingSwaps::::remove(target.clone(), hashed_proof.clone()); Self::deposit_event( - RawEvent::SwapClaimed(target, hashed_proof, swap.balance, succeeded) + RawEvent::SwapClaimed(target, hashed_proof, succeeded) ); Ok(()) @@ -238,7 +300,7 @@ decl_module! { #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] fn cancel_swap( origin, - target: AccountIdFor, + target: T::AccountId, hashed_proof: HashedProof, ) { let source = ensure_signed(origin)?; @@ -254,10 +316,7 @@ decl_module! { Error::::DurationNotPassed, ); - T::Currency::unreserve( - &swap.source, - swap.balance, - ); + swap.action.cancel(&swap.source); PendingSwaps::::remove(&target, hashed_proof.clone()); Self::deposit_event( diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 72db841de1..d04ffab205 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -21,7 +21,7 @@ impl_outer_origin! { // For testing the pallet, we construct most of a mock runtime. This means // first constructing a configuration type (`Test`) which `impl`s each of the // configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] +#[derive(Clone, Eq, Debug, PartialEq)] pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; @@ -71,7 +71,7 @@ parameter_types! { } impl Trait for Test { type Event = (); - type Currency = Balances; + type SwapAction = BalanceSwapAction; type ProofLimit = ProofLimit; } type System = frame_system::Module; @@ -109,7 +109,7 @@ fn two_party_successful_swap() { Origin::signed(A), B, hashed_proof.clone(), - 50, + BalanceSwapAction::new(50), 1000, ).unwrap(); @@ -123,7 +123,7 @@ fn two_party_successful_swap() { Origin::signed(B), A, hashed_proof.clone(), - 75, + BalanceSwapAction::new(75), 1000, ).unwrap(); @@ -136,6 +136,7 @@ fn two_party_successful_swap() { AtomicSwap::claim_swap( Origin::signed(A), proof.to_vec(), + BalanceSwapAction::new(75), ).unwrap(); assert_eq!(Balances::free_balance(A), 100 + 75); @@ -147,6 +148,7 @@ fn two_party_successful_swap() { AtomicSwap::claim_swap( Origin::signed(B), proof.to_vec(), + BalanceSwapAction::new(50), ).unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); -- GitLab From 6f86bdd897ce06c0169dcbd2454dc7587cb8461f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 21 Jun 2020 12:39:15 +0200 Subject: [PATCH 047/144] Fix issues with `Operational` transactions validity and prioritization. (#6435) * Fix weight limit for operational transactions. * Include BlockExecutionWeight. --- frame/system/src/lib.rs | 71 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index db6b528bcf..8eec6a2c37 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1395,8 +1395,10 @@ impl CheckWeight where info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { match info.class { - // Mandatory and Operational transactions does not - DispatchClass::Mandatory | DispatchClass::Operational => Ok(()), + // Mandatory transactions are included in a block unconditionally, so + // we don't verify weight. + DispatchClass::Mandatory => Ok(()), + // Normal transactions must not exceed `MaximumExtrinsicWeight`. DispatchClass::Normal => { let maximum_weight = T::MaximumExtrinsicWeight::get(); let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); @@ -1405,7 +1407,22 @@ impl CheckWeight where } else { Ok(()) } - } + }, + // For operational transactions we make sure it doesn't exceed + // the space alloted for `Operational` class. + DispatchClass::Operational => { + let maximum_weight = T::MaximumBlockWeight::get(); + let operational_limit = + Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; + let operational_limit = + operational_limit.saturating_sub(T::BlockExecutionWeight::get()); + let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); + if extrinsic_weight > operational_limit { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(()) + } + }, } } @@ -1484,9 +1501,11 @@ impl CheckWeight where fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { match info.class { DispatchClass::Normal => info.weight.into(), - DispatchClass::Operational => Bounded::max_value(), + // Don't use up the whole priority space, to allow things like `tip` + // to be taken into account as well. + DispatchClass::Operational => TransactionPriority::max_value() / 2, // Mandatory extrinsics are only for inherents; never transactions. - DispatchClass::Mandatory => Bounded::min_value(), + DispatchClass::Mandatory => TransactionPriority::min_value(), } } @@ -2452,6 +2471,42 @@ pub(crate) mod tests { }); } + #[test] + fn operational_extrinsic_limited_by_operational_space_limit() { + new_test_ext().execute_with(|| { + let operational_limit = CheckWeight::::get_dispatch_limit_ratio( + DispatchClass::Operational + ) * ::MaximumBlockWeight::get(); + let base_weight = ::ExtrinsicBaseWeight::get(); + let block_base = ::BlockExecutionWeight::get(); + + let weight = operational_limit - base_weight - block_base; + let okay = DispatchInfo { + weight, + class: DispatchClass::Operational, + ..Default::default() + }; + let max = DispatchInfo { + weight: weight + 1, + class: DispatchClass::Operational, + ..Default::default() + }; + let len = 0_usize; + + assert_eq!( + CheckWeight::::do_validate(&okay, len), + Ok(ValidTransaction { + priority: CheckWeight::::get_priority(&okay), + ..Default::default() + }) + ); + assert_noop!( + CheckWeight::::do_validate(&max, len), + InvalidTransaction::ExhaustsResources + ); + }); + } + #[test] fn register_extra_weight_unchecked_doesnt_care_about_limits() { new_test_ext().execute_with(|| { @@ -2479,6 +2534,8 @@ pub(crate) mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); assert_eq!(::MaximumBlockWeight::get(), 1024); assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + // Checking single extrinsic should not take current block weight into account. + assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); } @@ -2514,6 +2571,8 @@ pub(crate) mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); // Not too much though assert_noop!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len), InvalidTransaction::ExhaustsResources); + // Even with full block, validity of single transaction should be correct. + assert_eq!(CheckWeight::::check_extrinsic_weight(&dispatch_operational), Ok(())); }); } @@ -2559,7 +2618,7 @@ pub(crate) mod tests { .validate(&1, CALL, &op, len) .unwrap() .priority; - assert_eq!(priority, u64::max_value()); + assert_eq!(priority, u64::max_value() / 2); }) } -- GitLab From 3bf25c2c25afe523c5481d0792713aa816dd649a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 22 Jun 2020 13:29:35 +0200 Subject: [PATCH 048/144] `pallet-staking`: Expose missing consts (#6456) * `pallet-staking`: Expose missing consts * Apply suggestions from code review Co-authored-by: Nikolay Volf Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Update the source docs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Nikolay Volf Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- frame/staking/src/lib.rs | 45 ++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index aca68bd706..63b427a5ab 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -865,9 +865,10 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { /// Number of eras that staked funds must remain bonded for. type BondingDuration: Get; - /// Number of eras that slashes are deferred by, after computation. This should be less than the - /// bonding duration. Set to 0 if slashes should be applied immediately, without opportunity for - /// intervention. + /// Number of eras that slashes are deferred by, after computation. + /// + /// This should be less than the bonding duration. Set to 0 if slashes + /// should be applied immediately, without opportunity for intervention. type SlashDeferDuration: Get; /// The origin which can cancel a deferred slash. Root can always do this. @@ -884,6 +885,7 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { type NextNewSession: EstimateNextNewSession; /// The number of blocks before the end of the era from which election submissions are allowed. + /// /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will /// be used. /// @@ -894,14 +896,15 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { /// The overarching call type. type Call: Dispatchable + From> + IsSubType, Self> + Clone; - /// Maximum number of balancing iterations to run in the offchain submission. If set to 0, - /// balance_solution will not be executed at all. + /// Maximum number of balancing iterations to run in the offchain submission. + /// + /// If set to 0, balance_solution will not be executed at all. type MaxIterations: Get; /// The threshold of improvement that should be provided for a new solution to be accepted. type MinSolutionScoreBump: Get; - /// The maximum number of nominator rewarded for each validator. + /// The maximum number of nominators rewarded for each validator. /// /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim /// their reward. This used to limit the i/o cost for the nominator payout. @@ -1275,6 +1278,36 @@ decl_module! { /// Number of eras that staked funds must remain bonded for. const BondingDuration: EraIndex = T::BondingDuration::get(); + /// Number of eras that slashes are deferred by, after computation. + /// + /// This should be less than the bonding duration. + /// Set to 0 if slashes should be applied immediately, without opportunity for + /// intervention. + const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get(); + + /// The number of blocks before the end of the era from which election submissions are allowed. + /// + /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will + /// be used. + /// + /// This is bounded by being within the last session. Hence, setting it to a value more than the + /// length of a session will be pointless. + const ElectionLookahead: T::BlockNumber = T::ElectionLookahead::get(); + + /// Maximum number of balancing iterations to run in the offchain submission. + /// + /// If set to 0, balance_solution will not be executed at all. + const MaxIterations: u32 = T::MaxIterations::get(); + + /// The threshold of improvement that should be provided for a new solution to be accepted. + const MinSolutionScoreBump: Perbill = T::MinSolutionScoreBump::get(); + + /// The maximum number of nominators rewarded for each validator. + /// + /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim + /// their reward. This used to limit the i/o cost for the nominator payout. + const MaxNominatorRewardedPerValidator: u32 = T::MaxNominatorRewardedPerValidator::get(); + type Error = Error; fn deposit_event() = default; -- GitLab From 8329dbd4e50997ae714a5da2877b82530b5bb3c5 Mon Sep 17 00:00:00 2001 From: Alexander Popiak Date: Mon, 22 Jun 2020 18:15:47 +0200 Subject: [PATCH 049/144] update collective events docs to be consistent with changes (#6463) --- frame/collective/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 4551f4917a..2be0241243 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -165,11 +165,11 @@ decl_event! { Approved(Hash), /// A motion was not approved by the required threshold. Disapproved(Hash), - /// A motion was executed; `bool` is true if returned without error. + /// A motion was executed; result will be `Ok` if it returned without error. Executed(Hash, DispatchResult), - /// A single member did some action; `bool` is true if returned without error. + /// A single member did some action; result will be `Ok` if it returned without error. MemberExecuted(Hash, DispatchResult), - /// A proposal was closed after its duration was up. + /// A proposal was closed because its threshold was reached or after its duration was up. Closed(Hash, MemberCount, MemberCount), } } @@ -188,7 +188,7 @@ decl_error! { DuplicateVote, /// Members are already initialized! AlreadyInitialized, - /// The close call is made too early, before the end of the voting. + /// The close call was made too early, before the end of the voting. TooEarly, /// There can only be a maximum of `MaxProposals` active proposals. TooManyProposals, -- GitLab From 94b3812fb90e50fe30d38c461affb0deb3ad8169 Mon Sep 17 00:00:00 2001 From: s3krit Date: Mon, 22 Jun 2020 18:47:31 +0200 Subject: [PATCH 050/144] [CI] Don't tag PRs on companion job cancels (#6470) --- .github/workflows/polkadot-companion-labels.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml index dd00e72d6c..20aaa98a23 100644 --- a/.github/workflows/polkadot-companion-labels.yml +++ b/.github/workflows/polkadot-companion-labels.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Monitor the status of the gitlab-check-companion-build job - uses: s3krit/await-status-action@4528ebbdf6e29bbec77c41caad1b2dec148ba894 + uses: s3krit/await-status-action@v1.0.1 id: 'check-companion-status' with: authToken: ${{ secrets.GITHUB_TOKEN }} @@ -17,6 +17,8 @@ jobs: contexts: 'continuous-integration/gitlab-check-polkadot-companion-build' timeout: 1800 notPresentTimeout: 3600 # It can take quite a while before the job starts... + failedStates: failure + interruptedStates: error # Error = job was probably cancelled. We don't want to label the PR in that case - name: Label success uses: andymckay/labeler@master if: steps.check-companion-status.outputs.result == 'success' -- GitLab From 50eb257608ff141e32954b087a1ef64458f021b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Mon, 22 Jun 2020 19:41:37 +0100 Subject: [PATCH 051/144] network: remove unused variable (#6460) --- client/network/src/protocol/sync.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 781d410fff..c3e87ca19a 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -651,7 +651,6 @@ impl ChainSync { let blocks = &mut self.blocks; let attrs = &self.required_block_attributes; let fork_targets = &mut self.fork_targets; - let mut have_requests = false; let last_finalized = self.client.info().finalized_number; let best_queued = self.best_queued_number; let client = &self.client; @@ -681,7 +680,6 @@ impl ChainSync { peer.common_number, req, ); - have_requests = true; Some((id, req)) } else if let Some((hash, req)) = fork_sync_request( id, @@ -697,7 +695,6 @@ impl ChainSync { ) { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); - have_requests = true; Some((id, req)) } else { None -- GitLab From 19826b979b1874883837a7b3e30470f655a2a8e6 Mon Sep 17 00:00:00 2001 From: Roman Borschel Date: Tue, 23 Jun 2020 10:51:35 +0200 Subject: [PATCH 052/144] Avoid panic on dropping a `sc_network::service::out_events::Receiver`. (#6458) * Avoid panic on dropping a `Receiver`. * CI --- client/network/src/service/out_events.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 4a631601a6..1b86a5fa43 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -35,7 +35,7 @@ use crate::Event; use super::maybe_utf8_bytes_to_string; -use futures::{prelude::*, channel::mpsc, ready}; +use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use std::{ @@ -119,8 +119,10 @@ impl fmt::Debug for Receiver { impl Drop for Receiver { fn drop(&mut self) { - // Empty the list to properly decrease the metrics. - while let Some(Some(_)) = self.next().now_or_never() {} + if !self.inner.is_terminated() { + // Empty the list to properly decrease the metrics. + while let Some(Some(_)) = self.next().now_or_never() {} + } } } -- GitLab From 5a102f7c984a7e7c169cf2b74df24e35a20710a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 23 Jun 2020 11:17:53 +0200 Subject: [PATCH 053/144] Implement nested storage transactions (#6269) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add transactional storage functionality to OverlayChanges A collection already has a natural None state. No need to wrap it with an option. * Add storage transactions runtime interface * Add frame support for transactions * Fix committed typo * Rename 'changes' variable to 'overlay' * Fix renaming change * Fixed strange line break * Rename clear to clear_where * Add comment regarding delete value on mutation * Add comment which changes are covered by a transaction * Do force the arg to with_transaction return a Result * Use rust doc comments on every documentable place * Fix wording of insert_diry doc * Improve doc on start_transaction * Rename value to overlayed in close_transaction * Inline negation * Improve wording of close_transaction comments * Get rid of an expect by using get_or_insert_with * Remove trailing whitespace * Rename should to expected in tests * Rolling back a transaction must mark the overlay as dirty * Protect client initiated storage tx from being droped by runtime * Review nits * Return Err when entering or exiting runtime fails * Documentation fixup * Remove close type * Move enter/exit runtime to excute_aux in the state-machine * Rename Discard -> Rollback * Move child changeset creation to constructor * Move child spawning into the closure * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fixup for code suggestion * Unify re-exports * Rename overlay_changes to mod.rs and move into subdir * Change proof wording * Adapt a new test from master to storage-tx * Suggestions from the latest round of review * Fix warning message Co-authored-by: Bastian Köcher --- Cargo.lock | 28 +- frame/support/src/storage/mod.rs | 27 + .../support/test/tests/storage_transaction.rs | 159 ++++ .../api/proc-macro/src/impl_runtime_apis.rs | 13 +- primitives/externalities/src/lib.rs | 23 + primitives/io/src/lib.rs | 40 + primitives/runtime-interface/test/src/lib.rs | 1 - primitives/state-machine/Cargo.toml | 3 + primitives/state-machine/src/basic.rs | 12 + .../state-machine/src/changes_trie/build.rs | 41 +- primitives/state-machine/src/ext.rs | 48 +- primitives/state-machine/src/lib.rs | 41 +- .../src/overlayed_changes/changeset.rs | 752 ++++++++++++++++++ .../mod.rs} | 719 ++++++----------- primitives/state-machine/src/read_only.rs | 12 + primitives/state-machine/src/testing.rs | 6 +- 16 files changed, 1388 insertions(+), 537 deletions(-) create mode 100644 frame/support/test/tests/storage_transaction.rs create mode 100644 primitives/state-machine/src/overlayed_changes/changeset.rs rename primitives/state-machine/src/{overlayed_changes.rs => overlayed_changes/mod.rs} (50%) diff --git a/Cargo.lock b/Cargo.lock index 7597682395..930cb554c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -819,7 +819,7 @@ dependencies = [ "clap", "criterion-plot 0.3.1", "csv", - "itertools", + "itertools 0.8.2", "lazy_static", "libc", "num-traits 0.2.11", @@ -846,7 +846,7 @@ dependencies = [ "clap", "criterion-plot 0.4.1", "csv", - "itertools", + "itertools 0.8.2", "lazy_static", "num-traits 0.2.11", "oorandom", @@ -868,7 +868,7 @@ checksum = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" dependencies = [ "byteorder", "cast", - "itertools", + "itertools 0.8.2", ] [[package]] @@ -878,7 +878,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01e15e0ea58e8234f96146b1f91fa9d0e4dd7a38da93ff7a75d42c0b9d3a545" dependencies = [ "cast", - "itertools", + "itertools 0.8.2", ] [[package]] @@ -2294,6 +2294,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "0.4.5" @@ -5186,7 +5195,7 @@ checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" dependencies = [ "bytes 0.5.4", "heck", - "itertools", + "itertools 0.8.2", "log", "multimap", "petgraph", @@ -5203,7 +5212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" dependencies = [ "anyhow", - "itertools", + "itertools 0.8.2", "proc-macro2", "quote 1.0.6", "syn 1.0.17", @@ -7828,11 +7837,14 @@ version = "0.8.0-rc3" dependencies = [ "hash-db", "hex-literal", + "itertools 0.9.0", "log", "num-traits 0.2.11", "parity-scale-codec", "parking_lot 0.10.2", + "pretty_assertions", "rand 0.7.3", + "smallvec 1.4.0", "sp-core", "sp-externalities", "sp-panic-handler", @@ -8072,7 +8084,7 @@ dependencies = [ "hex", "hex-literal", "hyper 0.12.35", - "itertools", + "itertools 0.8.2", "jsonrpc-core-client", "libp2p", "node-primitives", @@ -8301,7 +8313,7 @@ dependencies = [ "build-helper", "cargo_metadata", "fs2", - "itertools", + "itertools 0.8.2", "tempfile", "toml", "walkdir", diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 6d0ef91ce1..c2d7ceef0f 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -29,6 +29,33 @@ pub mod child; pub mod generator; pub mod migration; +/// Describes whether a storage transaction should be committed or rolled back. +pub enum TransactionOutcome { + /// Transaction should be committed. + Commit(T), + /// Transaction should be rolled back. + Rollback(T), +} + +/// Execute the supplied function in a new storage transaction. +/// +/// All changes to storage performed by the supplied function are discarded if the returned +/// outcome is `TransactionOutcome::Rollback`. +/// +/// Transactions can be nested to any depth. Commits happen to the parent transaction. +pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { + use sp_io::storage::{ + start_transaction, commit_transaction, rollback_transaction, + }; + use TransactionOutcome::*; + + start_transaction(); + match f() { + Commit(res) => { commit_transaction(); res }, + Rollback(res) => { rollback_transaction(); res }, + } +} + /// A trait for working with macro-generated storage values under the substrate storage API. /// /// Details on implementation can be found at diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs new file mode 100644 index 0000000000..bf6c70966b --- /dev/null +++ b/frame/support/test/tests/storage_transaction.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Encode, Decode, EncodeLike}; +use frame_support::{ + StorageMap, StorageValue, storage::{with_transaction, TransactionOutcome::*}, +}; +use sp_io::TestExternalities; + +pub trait Trait { + type Origin; + type BlockNumber: Encode + Decode + EncodeLike + Default + Clone; +} + +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} +} + +frame_support::decl_storage!{ + trait Store for Module as StorageTransactions { + pub Value: u32; + pub Map: map hasher(twox_64_concat) String => u32; + } +} + + +#[test] +fn storage_transaction_basic_commit() { + TestExternalities::default().execute_with(|| { + + assert_eq!(Value::get(), 0); + assert!(!Map::contains_key("val0")); + + with_transaction(|| { + Value::set(99); + Map::insert("val0", 99); + assert_eq!(Value::get(), 99); + assert_eq!(Map::get("val0"), 99); + Commit(()) + }); + + assert_eq!(Value::get(), 99); + assert_eq!(Map::get("val0"), 99); + }); +} + +#[test] +fn storage_transaction_basic_rollback() { + TestExternalities::default().execute_with(|| { + + assert_eq!(Value::get(), 0); + assert_eq!(Map::get("val0"), 0); + + with_transaction(|| { + Value::set(99); + Map::insert("val0", 99); + assert_eq!(Value::get(), 99); + assert_eq!(Map::get("val0"), 99); + Rollback(()) + }); + + assert_eq!(Value::get(), 0); + assert_eq!(Map::get("val0"), 0); + }); +} + +#[test] +fn storage_transaction_rollback_then_commit() { + TestExternalities::default().execute_with(|| { + Value::set(1); + Map::insert("val1", 1); + + with_transaction(|| { + Value::set(2); + Map::insert("val1", 2); + Map::insert("val2", 2); + + with_transaction(|| { + Value::set(3); + Map::insert("val1", 3); + Map::insert("val2", 3); + Map::insert("val3", 3); + + assert_eq!(Value::get(), 3); + assert_eq!(Map::get("val1"), 3); + assert_eq!(Map::get("val2"), 3); + assert_eq!(Map::get("val3"), 3); + + Rollback(()) + }); + + assert_eq!(Value::get(), 2); + assert_eq!(Map::get("val1"), 2); + assert_eq!(Map::get("val2"), 2); + assert_eq!(Map::get("val3"), 0); + + Commit(()) + }); + + assert_eq!(Value::get(), 2); + assert_eq!(Map::get("val1"), 2); + assert_eq!(Map::get("val2"), 2); + assert_eq!(Map::get("val3"), 0); + }); +} + +#[test] +fn storage_transaction_commit_then_rollback() { + TestExternalities::default().execute_with(|| { + Value::set(1); + Map::insert("val1", 1); + + with_transaction(|| { + Value::set(2); + Map::insert("val1", 2); + Map::insert("val2", 2); + + with_transaction(|| { + Value::set(3); + Map::insert("val1", 3); + Map::insert("val2", 3); + Map::insert("val3", 3); + + assert_eq!(Value::get(), 3); + assert_eq!(Map::get("val1"), 3); + assert_eq!(Map::get("val2"), 3); + assert_eq!(Map::get("val3"), 3); + + Commit(()) + }); + + assert_eq!(Value::get(), 3); + assert_eq!(Map::get("val1"), 3); + assert_eq!(Map::get("val2"), 3); + assert_eq!(Map::get("val3"), 3); + + Rollback(()) + }); + + assert_eq!(Value::get(), 1); + assert_eq!(Map::get("val1"), 1); + assert_eq!(Map::get("val2"), 0); + assert_eq!(Map::get("val3"), 0); + }); +} diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index b999b9eefd..8f9927cadc 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -260,6 +260,7 @@ fn generate_runtime_api_base_structures() -> Result { &self, map_call: F, ) -> std::result::Result where Self: Sized { + self.changes.borrow_mut().start_transaction(); *self.commit_on_success.borrow_mut() = false; let res = map_call(self); *self.commit_on_success.borrow_mut() = true; @@ -369,6 +370,9 @@ fn generate_runtime_api_base_structures() -> Result { &self, call_api_at: F, ) -> std::result::Result<#crate_::NativeOrEncoded, E> { + if *self.commit_on_success.borrow() { + self.changes.borrow_mut().start_transaction(); + } let res = call_api_at( &self.call, self, @@ -384,11 +388,16 @@ fn generate_runtime_api_base_structures() -> Result { } fn commit_on_ok(&self, res: &std::result::Result) { + let proof = "\ + We only close a transaction when we opened one ourself. + Other parts of the runtime that make use of transactions (state-machine) + also balance their transactions. The runtime cannot close client initiated + transactions. qed"; if *self.commit_on_success.borrow() { if res.is_err() { - self.changes.borrow_mut().discard_prospective(); + self.changes.borrow_mut().rollback_transaction().expect(proof); } else { - self.changes.borrow_mut().commit_prospective(); + self.changes.borrow_mut().commit_transaction().expect(proof); } } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index cfb1d0878a..210fe5b4ef 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -195,6 +195,29 @@ pub trait Externalities: ExtensionStore { /// The returned hash is defined by the `Block` and is SCALE encoded. fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; + /// Start a new nested transaction. + /// + /// This allows to either commit or roll back all changes made after this call to the + /// top changes or the default child changes. For every transaction there cam be a + /// matching call to either `storage_rollback_transaction` or `storage_commit_transaction`. + /// Any transactions that are still open after returning from runtime are committed + /// automatically. + /// + /// Changes made without any open transaction are committed immediately. + fn storage_start_transaction(&mut self); + + /// Rollback the last transaction started by `storage_start_transaction`. + /// + /// Any changes made during that storage transaction are discarded. Returns an error when + /// no transaction is open that can be closed. + fn storage_rollback_transaction(&mut self) -> Result<(), ()>; + + /// Commit the last transaction started by `storage_start_transaction`. + /// + /// Any changes made during that storage transaction are committed. Returns an error when + /// no transaction is open that can be closed. + fn storage_commit_transaction(&mut self) -> Result<(), ()>; + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 1d5e01bdff..c75c8e67cc 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -155,6 +155,46 @@ pub trait Storage { fn next_key(&mut self, key: &[u8]) -> Option> { self.next_storage_key(&key) } + + /// Start a new nested transaction. + /// + /// This allows to either commit or roll back all changes that are made after this call. + /// For every transaction there must be a matching call to either `rollback_transaction` + /// or `commit_transaction`. This is also effective for all values manipulated using the + /// `DefaultChildStorage` API. + /// + /// # Warning + /// + /// This is a low level API that is potentially dangerous as it can easily result + /// in unbalanced transactions. For example, FRAME users should use high level storage + /// abstractions. + fn start_transaction(&mut self) { + self.storage_start_transaction(); + } + + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. + /// + /// # Panics + /// + /// Will panic if there is no open transaction. + fn rollback_transaction(&mut self) { + self.storage_rollback_transaction() + .expect("No open transaction that can be rolled back."); + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. + /// + /// # Panics + /// + /// Will panic if there is no open transaction. + fn commit_transaction(&mut self) { + self.storage_commit_transaction() + .expect("No open transaction that can be committed."); + } } /// Interface for accessing the child storage for default child trie, diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 06bc4e8ed8..109caab606 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -55,7 +55,6 @@ fn call_wasm_method_with_result( &mut ext_ext, sp_core::traits::MissingHostFunctions::Disallow, ).map_err(|e| format!("Failed to execute `{}`: {}", method, e))?; - Ok(ext) } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 77b9e304d4..29c8676f7e 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -25,10 +25,13 @@ codec = { package = "parity-scale-codec", version = "1.3.1" } num-traits = "0.2.8" rand = "0.7.2" sp-externalities = { version = "0.8.0-rc3", path = "../externalities" } +itertools = "0.9" +smallvec = "1.4" [dev-dependencies] hex-literal = "0.2.1" sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } +pretty_assertions = "0.6.1" [features] default = [] diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 917e41f33d..dbb4c6c2b8 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -307,6 +307,18 @@ impl Externalities for BasicExternalities { Ok(None) } + fn storage_start_transaction(&mut self) { + unimplemented!("Transactions are not supported by BasicExternalities"); + } + + fn storage_rollback_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by BasicExternalities"); + } + + fn storage_commit_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by BasicExternalities"); + } + fn wipe(&mut self) {} fn commit(&mut self) {} diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index f9698f1a31..bf910e2c4f 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -25,7 +25,7 @@ use num_traits::One; use crate::{ StorageKey, backend::Backend, - overlayed_changes::OverlayedChanges, + overlayed_changes::{OverlayedChanges, OverlayedValue}, trie_backend_essence::TrieBackendEssence, changes_trie::{ AnchorBlockId, ConfigurationRange, Storage, BlockNumber, @@ -43,7 +43,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( backend: &'a B, storage: &'a dyn Storage, config: ConfigurationRange<'a, Number>, - changes: &'a OverlayedChanges, + overlay: &'a OverlayedChanges, parent: &'a AnchorBlockId, ) -> Result<( impl Iterator> + 'a, @@ -60,7 +60,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( let (extrinsics_input, children_extrinsics_input) = prepare_extrinsics_input( backend, &number, - changes, + overlay, )?; let (digest_input, mut children_digest_input, digest_input_blocks) = prepare_digest_input::( parent, @@ -96,7 +96,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( fn prepare_extrinsics_input<'a, B, H, Number>( backend: &'a B, block: &Number, - changes: &'a OverlayedChanges, + overlay: &'a OverlayedChanges, ) -> Result<( impl Iterator> + 'a, BTreeMap, impl Iterator> + 'a>, @@ -108,20 +108,21 @@ fn prepare_extrinsics_input<'a, B, H, Number>( { let mut children_result = BTreeMap::new(); - for child_info in changes.child_infos() { + for (child_changes, child_info) in overlay.children() { let child_index = ChildIndex:: { block: block.clone(), storage_key: child_info.prefixed_storage_key(), }; let iter = prepare_extrinsics_input_inner( - backend, block, changes, - Some(child_info.clone()) + backend, block, overlay, + Some(child_info.clone()), + child_changes, )?; children_result.insert(child_index, iter); } - let top = prepare_extrinsics_input_inner(backend, block, changes, None)?; + let top = prepare_extrinsics_input_inner(backend, block, overlay, None, overlay.changes())?; Ok((top, children_result)) } @@ -129,40 +130,38 @@ fn prepare_extrinsics_input<'a, B, H, Number>( fn prepare_extrinsics_input_inner<'a, B, H, Number>( backend: &'a B, block: &Number, - changes: &'a OverlayedChanges, + overlay: &'a OverlayedChanges, child_info: Option, + changes: impl Iterator ) -> Result> + 'a, String> where B: Backend, H: Hasher, Number: BlockNumber, { - changes.changes(child_info.as_ref()) - .filter(|( _, v)| v.extrinsics().is_some()) + changes + .filter(|( _, v)| v.extrinsics().next().is_some()) .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, v)| { match map.entry(k) { Entry::Vacant(entry) => { // ignore temporary values (values that have null value at the end of operation // AND are not in storage at the beginning of operation if let Some(child_info) = child_info.as_ref() { - if !changes.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { + if !overlay.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { if !backend.exists_child_storage(&child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); } } } else { - if !changes.storage(k).map(|v| v.is_some()).unwrap_or_default() { + if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { return Ok(map); } } }; - let extrinsics = v.extrinsics() - .expect("filtered by filter() call above; qed") - .cloned() - .collect(); + let extrinsics = v.extrinsics().cloned().collect(); entry.insert((ExtrinsicIndex { block: block.clone(), key: k.to_vec(), @@ -173,9 +172,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND we are checking it before insertion let extrinsics = &mut entry.get_mut().1; extrinsics.extend( - v.extrinsics() - .expect("filtered by filter() call above; qed") - .cloned() + v.extrinsics().cloned() ); extrinsics.sort_unstable(); }, @@ -404,6 +401,8 @@ mod test { let mut changes = OverlayedChanges::default(); changes.set_collect_extrinsics(true); + changes.start_transaction(); + changes.set_extrinsic_index(1); changes.set_storage(vec![101], Some(vec![203])); @@ -411,7 +410,7 @@ mod test { changes.set_storage(vec![100], Some(vec![202])); changes.set_child_storage(&child_info_1, vec![100], Some(vec![202])); - changes.commit_prospective(); + changes.commit_transaction().unwrap(); changes.set_extrinsic_index(0); changes.set_storage(vec![100], Some(vec![0])); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 7e805250e7..2cd63cde97 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -37,6 +37,10 @@ use std::{error, fmt, any::{Any, TypeId}}; use log::{warn, trace}; const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; +const BENCHMARKING_FN: &str = "\ + This is a special fn only for benchmarking where a database commit happens from the runtime. + For that reason client started transactions before calling into runtime are not allowed. + Without client transactions the loop condition garantuees the success of the tx close."; /// Errors that can occur when interacting with the externalities. #[derive(Debug, Copy, Clone)] @@ -147,7 +151,7 @@ where self.backend.pairs().iter() .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) - .chain(self.overlay.changes(None).map(|(k, v)| (k.clone(), v.value().cloned()))) + .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) @@ -477,15 +481,14 @@ where ); root.encode() } else { + let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { + let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); + Some(self.backend.child_storage_root(info, delta)) + } else { + None + }; - if let Some(child_info) = self.overlay.default_child_info(storage_key) { - let (root, is_empty, _) = { - let delta = self.overlay.changes(Some(child_info)) - .map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - - self.backend.child_storage_root(child_info, delta) - }; - + if let Some((root, is_empty, _)) = root { let root = root.encode(); // We store update in the overlay in order to be able to use 'self.storage_transaction' // cache. This is brittle as it rely on Ext only querying the trie backend for @@ -547,20 +550,37 @@ where root.map(|r| r.map(|o| o.encode())) } + fn storage_start_transaction(&mut self) { + self.overlay.start_transaction() + } + + fn storage_rollback_transaction(&mut self) -> Result<(), ()> { + self.mark_dirty(); + self.overlay.rollback_transaction().map_err(|_| ()) + } + + fn storage_commit_transaction(&mut self) -> Result<(), ()> { + self.overlay.commit_transaction().map_err(|_| ()) + } + fn wipe(&mut self) { - self.overlay.discard_prospective(); + for _ in 0..self.overlay.transaction_depth() { + self.overlay.rollback_transaction().expect(BENCHMARKING_FN); + } self.overlay.drain_storage_changes( &self.backend, None, Default::default(), self.storage_transaction_cache, ).expect(EXT_NOT_ALLOWED_TO_FAIL); - self.storage_transaction_cache.reset(); - self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL) + self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); + self.mark_dirty(); } fn commit(&mut self) { - self.overlay.commit_prospective(); + for _ in 0..self.overlay.transaction_depth() { + self.overlay.commit_transaction().expect(BENCHMARKING_FN); + } let changes = self.overlay.drain_storage_changes( &self.backend, None, @@ -571,7 +591,7 @@ where changes.transaction_storage_root, changes.transaction, ).expect(EXT_NOT_ALLOWED_TO_FAIL); - self.storage_transaction_cache.reset(); + self.mark_dirty(); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index b863d155e7..e5e48bc47c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -79,6 +79,10 @@ pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; +const PROOF_CLOSE_TRANSACTION: &str = "\ + Closing a transaction that was started in this function. Client initiated transactions + are protected from being closed by the runtime. qed"; + type CallResult = Result, E>; /// Default handler of the execution manager. @@ -297,6 +301,8 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where None => &mut cache, }; + self.overlay.enter_runtime().expect("StateMachine is never called from the runtime; qed"); + let mut ext = Ext::new( self.overlay, self.offchain_overlay, @@ -324,6 +330,9 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where native_call, ); + self.overlay.exit_runtime() + .expect("Runtime is not able to call this function in the overlay; qed"); + trace!( target: "state", "{:04x}: Return. Native={:?}, Result={:?}", id, @@ -347,11 +356,11 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where CallResult, ) -> CallResult { - let pending_changes = self.overlay.clone_pending(); + self.overlay.start_transaction(); let (result, was_native) = self.execute_aux(true, native_call.take()); if was_native { - self.overlay.replace_pending(pending_changes); + self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); let (wasm_result, _) = self.execute_aux( false, native_call, @@ -366,6 +375,7 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where on_consensus_failure(wasm_result, result) } } else { + self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } } @@ -378,16 +388,17 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, { - let pending_changes = self.overlay.clone_pending(); + self.overlay.start_transaction(); let (result, was_native) = self.execute_aux( true, native_call.take(), ); if !was_native || result.is_ok() { + self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { - self.overlay.replace_pending(pending_changes); + self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); let (wasm_result, _) = self.execute_aux( false, native_call, @@ -977,7 +988,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); overlay.set_storage(b"bab".to_vec(), Some(b"228".to_vec())); - overlay.commit_prospective(); + overlay.start_transaction(); overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())); overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); @@ -994,10 +1005,10 @@ mod tests { ); ext.clear_prefix(b"ab"); } - overlay.commit_prospective(); + overlay.commit_transaction().unwrap(); assert_eq!( - overlay.changes(None).map(|(k, v)| (k.clone(), v.value().cloned())) + overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ b"abc".to_vec() => None.into(), @@ -1083,7 +1094,7 @@ mod tests { Some(vec![reference_data[0].clone()].encode()), ); } - overlay.commit_prospective(); + overlay.start_transaction(); { let mut ext = Ext::new( &mut overlay, @@ -1102,7 +1113,7 @@ mod tests { Some(reference_data.encode()), ); } - overlay.discard_prospective(); + overlay.rollback_transaction().unwrap(); { let ext = Ext::new( &mut overlay, @@ -1145,7 +1156,7 @@ mod tests { ext.clear_storage(key.as_slice()); ext.storage_append(key.clone(), Item::InitializationItem.encode()); } - overlay.commit_prospective(); + overlay.start_transaction(); // For example, first transaction resulted in panic during block building { @@ -1170,7 +1181,7 @@ mod tests { Some(vec![Item::InitializationItem, Item::DiscardedItem].encode()), ); } - overlay.discard_prospective(); + overlay.rollback_transaction().unwrap(); // Then we apply next transaction which is valid this time. { @@ -1196,7 +1207,7 @@ mod tests { ); } - overlay.commit_prospective(); + overlay.start_transaction(); // Then only initlaization item and second (commited) item should persist. { @@ -1317,9 +1328,11 @@ mod tests { let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); + overlay.start_transaction(); overlay.set_storage(b"ccc".to_vec(), Some(b"".to_vec())); assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); - overlay.commit_prospective(); + overlay.commit_transaction().unwrap(); + overlay.start_transaction(); assert_eq!(overlay.storage(b"ccc"), Some(Some(&[][..]))); assert_eq!(overlay.storage(b"bbb"), None); @@ -1339,7 +1352,7 @@ mod tests { ext.clear_storage(b"ccc"); assert_eq!(ext.storage(b"ccc"), None); } - overlay.commit_prospective(); + overlay.commit_transaction().unwrap(); assert_eq!(overlay.storage(b"ccc"), Some(None)); } } diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs new file mode 100644 index 0000000000..fe43c0ea99 --- /dev/null +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -0,0 +1,752 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +//! Houses the code that implements the transactional overlay storage. + +use super::{StorageKey, StorageValue}; + +use itertools::Itertools; +use std::collections::{HashSet, BTreeMap, BTreeSet}; +use smallvec::SmallVec; +use log::warn; + +const PROOF_OVERLAY_NON_EMPTY: &str = "\ + An OverlayValue is always created with at least one transaction and dropped as soon + as the last transaction is removed; qed"; + +type DirtyKeysSets = SmallVec<[HashSet; 5]>; +type Transactions = SmallVec<[InnerValue; 5]>; + +/// Error returned when trying to commit or rollback while no transaction is open or +/// when the runtime is trying to close a transaction started by the client. +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct NoOpenTransaction; + +/// Error when calling `enter_runtime` when already being in runtime execution mode. +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct AlreadyInRuntime; + +/// Error when calling `exit_runtime` when not being in runtime exection mdde. +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct NotInRuntime; + +/// Describes in which mode the node is currently executing. +#[derive(Debug, Clone, Copy)] +pub enum ExecutionMode { + /// Exeuting in client mode: Removal of all transactions possible. + Client, + /// Executing in runtime mode: Transactions started by the client are protected. + Runtime, +} + +#[derive(Debug, Default, Clone)] +#[cfg_attr(test, derive(PartialEq))] +struct InnerValue { + /// Current value. None if value has been deleted. + value: Option, + /// The set of extrinsic indices where the values has been changed. + /// Is filled only if runtime has announced changes trie support. + extrinsics: BTreeSet, +} + +/// An overlay that contains all versions of a value for a specific key. +#[derive(Debug, Default, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub struct OverlayedValue { + /// The individual versions of that value. + /// One entry per transactions during that the value was actually written. + transactions: Transactions, +} + +/// Holds a set of changes with the ability modify them using nested transactions. +#[derive(Debug, Default, Clone)] +pub struct OverlayedChangeSet { + /// Stores the changes that this overlay constitutes. + changes: BTreeMap, + /// Stores which keys are dirty per transaction. Needed in order to determine which + /// values to merge into the parent transaction on commit. The length of this vector + /// therefore determines how many nested transactions are currently open (depth). + dirty_keys: DirtyKeysSets, + /// The number of how many transactions beginning from the first transactions are started + /// by the client. Those transactions are protected against close (commit, rollback) + /// when in runtime mode. + num_client_transactions: usize, + /// Determines whether the node is using the overlay from the client or the runtime. + execution_mode: ExecutionMode, +} + +impl Default for ExecutionMode { + fn default() -> Self { + Self::Client + } +} + +impl OverlayedValue { + /// The value as seen by the current transaction. + pub fn value(&self) -> Option<&StorageValue> { + self.transactions.last().expect(PROOF_OVERLAY_NON_EMPTY).value.as_ref() + } + + /// Unique list of extrinsic indices which modified the value. + pub fn extrinsics(&self) -> impl Iterator { + self.transactions.iter().flat_map(|t| t.extrinsics.iter()).unique() + } + + /// Mutable reference to the most recent version. + fn value_mut(&mut self) -> &mut Option { + &mut self.transactions.last_mut().expect(PROOF_OVERLAY_NON_EMPTY).value + } + + /// Remove the last version and return it. + fn pop_transaction(&mut self) -> InnerValue { + self.transactions.pop().expect(PROOF_OVERLAY_NON_EMPTY) + } + + /// Mutable reference to the set which holds the indices for the **current transaction only**. + fn transaction_extrinsics_mut(&mut self) -> &mut BTreeSet { + &mut self.transactions.last_mut().expect(PROOF_OVERLAY_NON_EMPTY).extrinsics + } + + /// Writes a new version of a value. + /// + /// This makes sure that the old version is not overwritten and can be properly + /// rolled back when required. + fn set( + &mut self, + value: Option, + first_write_in_tx: bool, + at_extrinsic: Option, + ) { + if first_write_in_tx || self.transactions.is_empty() { + self.transactions.push(InnerValue { + value, + .. Default::default() + }); + } else { + *self.value_mut() = value; + } + + if let Some(extrinsic) = at_extrinsic { + self.transaction_extrinsics_mut().insert(extrinsic); + } + } +} + +/// Inserts a key into the dirty set. +/// +/// Returns true iff we are currently have at least one open transaction and if this +/// is the first write to the given key that transaction. +fn insert_dirty(set: &mut DirtyKeysSets, key: StorageKey) -> bool { + set.last_mut().map(|dk| dk.insert(key)).unwrap_or_default() +} + +impl OverlayedChangeSet { + /// Create a new changeset at the same transaction state but without any contents. + /// + /// This changeset might be created when there are already open transactions. + /// We need to catch up here so that the child is at the same transaction depth. + pub fn spawn_child(&self) -> Self { + use std::iter::repeat; + Self { + dirty_keys: repeat(HashSet::new()).take(self.transaction_depth()).collect(), + num_client_transactions: self.num_client_transactions, + execution_mode: self.execution_mode, + .. Default::default() + } + } + + /// True if no changes at all are contained in the change set. + pub fn is_empty(&self) -> bool { + self.changes.is_empty() + } + + /// Get an optional reference to the value stored for the specified key. + pub fn get(&self, key: &[u8]) -> Option<&OverlayedValue> { + self.changes.get(key) + } + + /// Set a new value for the specified key. + /// + /// Can be rolled back or committed when called inside a transaction. + pub fn set( + &mut self, + key: StorageKey, + value: Option, + at_extrinsic: Option, + ) { + let overlayed = self.changes.entry(key.clone()).or_default(); + overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); + } + + /// Get a mutable reference for a value. + /// + /// Can be rolled back or committed when called inside a transaction. + #[must_use = "A change was registered, so this value MUST be modified."] + pub fn modify( + &mut self, + key: StorageKey, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) -> &mut Option { + let overlayed = self.changes.entry(key.clone()).or_default(); + let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); + let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { + if first_write_in_tx { + Some(tx.value.clone()) + } else { + None + } + } else { + Some(Some(init())) + }; + + if let Some(cloned) = clone_into_new_tx { + overlayed.set(cloned, first_write_in_tx, at_extrinsic); + } + overlayed.value_mut() + } + + /// Set all values to deleted which are matched by the predicate. + /// + /// Can be rolled back or committed when called inside a transaction. + pub fn clear_where( + &mut self, + predicate: impl Fn(&[u8], &OverlayedValue) -> bool, + at_extrinsic: Option, + ) { + for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { + val.set(None, insert_dirty(&mut self.dirty_keys, key.to_owned()), at_extrinsic); + } + } + + /// Get a list of all changes as seen by current transaction. + pub fn changes(&self) -> impl Iterator { + self.changes.iter() + } + + /// Get the change that is next to the supplied key. + pub fn next_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { + use std::ops::Bound; + let range = (Bound::Excluded(key), Bound::Unbounded); + self.changes.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)) + } + + /// Consume this changeset and return all committed changes. + /// + /// Panics: + /// Panics if there are open transactions: `transaction_depth() > 0` + pub fn drain_commited(self) -> impl Iterator)> { + assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); + self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) + } + + /// Returns the current nesting depth of the transaction stack. + /// + /// A value of zero means that no transaction is open and changes are committed on write. + pub fn transaction_depth(&self) -> usize { + self.dirty_keys.len() + } + + /// Call this before transfering control to the runtime. + /// + /// This protects all existing transactions from being removed by the runtime. + /// Calling this while already inside the runtime will return an error. + pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { + if let ExecutionMode::Runtime = self.execution_mode { + return Err(AlreadyInRuntime); + } + self.execution_mode = ExecutionMode::Runtime; + self.num_client_transactions = self.transaction_depth(); + Ok(()) + } + + /// Call this when control returns from the runtime. + /// + /// This commits all dangling transaction left open by the runtime. + /// Calling this while already outside the runtime will return an error. + pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + if let ExecutionMode::Client = self.execution_mode { + return Err(NotInRuntime); + } + self.execution_mode = ExecutionMode::Client; + if self.has_open_runtime_transactions() { + warn!( + "{} storage transactions are left open by the runtime. Those will be rolled back.", + self.transaction_depth() - self.num_client_transactions, + ); + } + while self.has_open_runtime_transactions() { + self.rollback_transaction() + .expect("The loop condition checks that the transaction depth is > 0; qed"); + } + Ok(()) + } + + /// Start a new nested transaction. + /// + /// This allows to either commit or roll back all changes that were made while this + /// transaction was open. Any transaction must be closed by either `commit_transaction` + /// or `rollback_transaction` before this overlay can be converted into storage changes. + /// + /// Changes made without any open transaction are committed immediately. + pub fn start_transaction(&mut self) { + self.dirty_keys.push(Default::default()); + } + + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. Returns an error if + /// there is no open transaction that can be rolled back. + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(true) + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. Returns an error if + /// there is no open transaction that can be committed. + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.close_transaction(false) + } + + fn close_transaction(&mut self, rollback: bool) -> Result<(), NoOpenTransaction> { + // runtime is not allowed to close transactions started by the client + if let ExecutionMode::Runtime = self.execution_mode { + if !self.has_open_runtime_transactions() { + return Err(NoOpenTransaction) + } + } + + for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { + let overlayed = self.changes.get_mut(&key).expect("\ + A write to an OverlayedValue is recorded in the dirty key set. Before an + OverlayedValue is removed, its containing dirty set is removed. This + function is only called for keys that are in the dirty set. qed\ + "); + + if rollback { + overlayed.pop_transaction(); + + // We need to remove the key as an `OverlayValue` with no transactions + // violates its invariant of always having at least one transaction. + if overlayed.transactions.is_empty() { + self.changes.remove(&key); + } + } else { + let has_predecessor = if let Some(dirty_keys) = self.dirty_keys.last_mut() { + // Not the last tx: Did the previous tx write to this key? + !dirty_keys.insert(key) + } else { + // Last tx: Is there already a value in the committed set? + // Check against one rather than empty because the current tx is still + // in the list as it is popped later in this function. + overlayed.transactions.len() > 1 + }; + + // We only need to merge if there is an pre-existing value. It may be a value from + // the previous transaction or a value committed without any open transaction. + if has_predecessor { + let dropped_tx = overlayed.pop_transaction(); + *overlayed.value_mut() = dropped_tx.value; + overlayed.transaction_extrinsics_mut().extend(dropped_tx.extrinsics); + } + } + } + + Ok(()) + } + + fn has_open_runtime_transactions(&self) -> bool { + self.transaction_depth() > self.num_client_transactions + } +} + +#[cfg(test)] +mod test { + use super::*; + use pretty_assertions::assert_eq; + + type Changes<'a> = Vec<(&'a [u8], (Option<&'a [u8]>, Vec))>; + type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; + + fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { + let is: Changes = is.changes().map(|(k, v)| { + (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().cloned().collect())) + }).collect(); + assert_eq!(&is, expected); + } + + fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { + let is = is.drain_commited().collect::>(); + let expected = expected + .iter() + .map(|(k, v)| (k.to_vec(), v.0.map(From::from))).collect::>(); + assert_eq!(is, expected); + } + + fn assert_drained(is: OverlayedChangeSet, expected: Drained) { + let is = is.drain_commited().collect::>(); + let expected = expected + .iter() + .map(|(k, v)| (k.to_vec(), v.map(From::from))).collect::>(); + assert_eq!(is, expected); + } + + #[test] + fn no_transaction_works() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)); + + assert_drained(changeset, vec![ + (b"key0", Some(b"val0-1")), + (b"key1", Some(b"val1")), + ]); + } + + #[test] + fn transaction_works() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + + // no transaction: committed on set + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); + + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 1); + + // we will commit that later + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); + + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 2); + + // we will roll that back + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); + + // changes contain all changes not only the commmited ones. + let all_changes: Changes = vec![ + (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), + (b"key1", (Some(b"val1"), vec![1])), + (b"key42", (Some(b"val42-rolled"), vec![42, 421])), + (b"key5", (Some(b"val5-rolled"), vec![])), + (b"key7", (Some(b"val7-rolled"), vec![77])), + (b"key99", (Some(b"val99"), vec![99])), + ]; + assert_changes(&changeset, &all_changes); + + // this should be no-op + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 3); + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 4); + changeset.rollback_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 3); + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 2); + assert_changes(&changeset, &all_changes); + + // roll back our first transactions that actually contains something + changeset.rollback_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 1); + + let rolled_back: Changes = vec![ + (b"key0", (Some(b"val0-1"), vec![1, 10])), + (b"key1", (Some(b"val1"), vec![1])), + (b"key42", (Some(b"val42"), vec![42])), + (b"key99", (Some(b"val99"), vec![99])), + ]; + assert_changes(&changeset, &rolled_back); + + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 0); + assert_changes(&changeset, &rolled_back); + + assert_drained_changes(changeset, rolled_back); + } + + #[test] + fn transaction_commit_then_rollback_works() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(10)); + + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 1); + + changeset.set(b"key42".to_vec(), Some(b"val42".to_vec()), Some(42)); + changeset.set(b"key99".to_vec(), Some(b"val99".to_vec()), Some(99)); + + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 2); + + changeset.set(b"key42".to_vec(), Some(b"val42-rolled".to_vec()), Some(421)); + changeset.set(b"key7".to_vec(), Some(b"val7-rolled".to_vec()), Some(77)); + changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); + changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); + + let all_changes: Changes = vec![ + (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), + (b"key1", (Some(b"val1"), vec![1])), + (b"key42", (Some(b"val42-rolled"), vec![42, 421])), + (b"key5", (Some(b"val5-rolled"), vec![])), + (b"key7", (Some(b"val7-rolled"), vec![77])), + (b"key99", (Some(b"val99"), vec![99])), + ]; + assert_changes(&changeset, &all_changes); + + // this should be no-op + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 3); + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 4); + changeset.rollback_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 3); + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 2); + assert_changes(&changeset, &all_changes); + + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 1); + + assert_changes(&changeset, &all_changes); + + changeset.rollback_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 0); + + let rolled_back: Changes = vec![ + (b"key0", (Some(b"val0-1"), vec![1, 10])), + (b"key1", (Some(b"val1"), vec![1])), + ]; + assert_changes(&changeset, &rolled_back); + + assert_drained_changes(changeset, rolled_back); + } + + #[test] + fn modify_works() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + let init = || b"valinit".to_vec(); + + // committed set + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + changeset.set(b"key1".to_vec(), None, Some(1)); + let val = changeset.modify(b"key3".to_vec(), init, Some(3)); + assert_eq!(val, &Some(b"valinit".to_vec())); + val.as_mut().unwrap().extend_from_slice(b"-modified"); + + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 1); + changeset.start_transaction(); + assert_eq!(changeset.transaction_depth(), 2); + + // non existing value -> init value should be returned + let val = changeset.modify(b"key2".to_vec(), init, Some(2)); + assert_eq!(val, &Some(b"valinit".to_vec())); + val.as_mut().unwrap().extend_from_slice(b"-modified"); + + // existing value should be returned by modify + let val = changeset.modify(b"key0".to_vec(), init, Some(10)); + assert_eq!(val, &Some(b"val0".to_vec())); + val.as_mut().unwrap().extend_from_slice(b"-modified"); + + // should work for deleted keys + let val = changeset.modify(b"key1".to_vec(), init, Some(20)); + assert_eq!(val, &None); + *val = Some(b"deleted-modified".to_vec()); + + let all_changes: Changes = vec![ + (b"key0", (Some(b"val0-modified"), vec![0, 10])), + (b"key1", (Some(b"deleted-modified"), vec![1, 20])), + (b"key2", (Some(b"valinit-modified"), vec![2])), + (b"key3", (Some(b"valinit-modified"), vec![3])), + ]; + assert_changes(&changeset, &all_changes); + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 1); + assert_changes(&changeset, &all_changes); + + changeset.rollback_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 0); + let rolled_back: Changes = vec![ + (b"key0", (Some(b"val0"), vec![0])), + (b"key1", (None, vec![1])), + (b"key3", (Some(b"valinit-modified"), vec![3])), + ]; + assert_changes(&changeset, &rolled_back); + assert_drained_changes(changeset, rolled_back); + } + + #[test] + fn clear_works() { + let mut changeset = OverlayedChangeSet::default(); + + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.set(b"del1".to_vec(), Some(b"delval1".to_vec()), Some(3)); + changeset.set(b"del2".to_vec(), Some(b"delval2".to_vec()), Some(4)); + + changeset.start_transaction(); + + changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); + + assert_changes(&changeset, &vec![ + (b"del1", (None, vec![3, 5])), + (b"del2", (None, vec![4, 5])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ]); + + changeset.rollback_transaction().unwrap(); + + assert_changes(&changeset, &vec![ + (b"del1", (Some(b"delval1"), vec![3])), + (b"del2", (Some(b"delval2"), vec![4])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ]); + } + + #[test] + fn next_change_works() { + let mut changeset = OverlayedChangeSet::default(); + + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(0)); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(1)); + changeset.set(b"key2".to_vec(), Some(b"val2".to_vec()), Some(2)); + + changeset.start_transaction(); + + changeset.set(b"key3".to_vec(), Some(b"val3".to_vec()), Some(3)); + changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)); + changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)); + + assert_eq!(changeset.next_change(b"key0").unwrap().0, b"key1"); + assert_eq!(changeset.next_change(b"key0").unwrap().1.value(), Some(&b"val1".to_vec())); + assert_eq!(changeset.next_change(b"key1").unwrap().0, b"key11"); + assert_eq!(changeset.next_change(b"key1").unwrap().1.value(), Some(&b"val11".to_vec())); + assert_eq!(changeset.next_change(b"key11").unwrap().0, b"key2"); + assert_eq!(changeset.next_change(b"key11").unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!(changeset.next_change(b"key2").unwrap().0, b"key3"); + assert_eq!(changeset.next_change(b"key2").unwrap().1.value(), Some(&b"val3".to_vec())); + assert_eq!(changeset.next_change(b"key3").unwrap().0, b"key4"); + assert_eq!(changeset.next_change(b"key3").unwrap().1.value(), Some(&b"val4".to_vec())); + assert_eq!(changeset.next_change(b"key4"), None); + + changeset.rollback_transaction().unwrap(); + + assert_eq!(changeset.next_change(b"key0").unwrap().0, b"key1"); + assert_eq!(changeset.next_change(b"key0").unwrap().1.value(), Some(&b"val1".to_vec())); + assert_eq!(changeset.next_change(b"key1").unwrap().0, b"key2"); + assert_eq!(changeset.next_change(b"key1").unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!(changeset.next_change(b"key11").unwrap().0, b"key2"); + assert_eq!(changeset.next_change(b"key11").unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!(changeset.next_change(b"key2"), None); + assert_eq!(changeset.next_change(b"key3"), None); + assert_eq!(changeset.next_change(b"key4"), None); + + } + + #[test] + fn no_open_tx_commit_errors() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + } + + #[test] + fn no_open_tx_rollback_errors() { + let mut changeset = OverlayedChangeSet::default(); + assert_eq!(changeset.transaction_depth(), 0); + assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); + } + + #[test] + fn unbalanced_transactions_errors() { + let mut changeset = OverlayedChangeSet::default(); + changeset.start_transaction(); + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + } + + #[test] + #[should_panic] + fn drain_with_open_transaction_panics() { + let mut changeset = OverlayedChangeSet::default(); + changeset.start_transaction(); + let _ = changeset.drain_commited(); + } + + #[test] + fn runtime_cannot_close_client_tx() { + let mut changeset = OverlayedChangeSet::default(); + changeset.start_transaction(); + changeset.enter_runtime().unwrap(); + changeset.start_transaction(); + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.commit_transaction(), Err(NoOpenTransaction)); + assert_eq!(changeset.rollback_transaction(), Err(NoOpenTransaction)); + } + + #[test] + fn exit_runtime_closes_runtime_tx() { + let mut changeset = OverlayedChangeSet::default(); + + changeset.start_transaction(); + + changeset.set(b"key0".to_vec(), Some(b"val0".to_vec()), Some(1)); + + changeset.enter_runtime().unwrap(); + changeset.start_transaction(); + changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); + changeset.exit_runtime().unwrap(); + + changeset.commit_transaction().unwrap(); + assert_eq!(changeset.transaction_depth(), 0); + + assert_drained(changeset, vec![ + (b"key0", Some(b"val0")), + ]); + } + + #[test] + fn enter_exit_runtime_fails_when_already_in_requested_mode() { + let mut changeset = OverlayedChangeSet::default(); + + assert_eq!(changeset.exit_runtime(), Err(NotInRuntime)); + assert_eq!(changeset.enter_runtime(), Ok(())); + assert_eq!(changeset.enter_runtime(), Err(AlreadyInRuntime)); + assert_eq!(changeset.exit_runtime(), Ok(())); + assert_eq!(changeset.exit_runtime(), Err(NotInRuntime)); + } +} diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes/mod.rs similarity index 50% rename from primitives/state-machine/src/overlayed_changes.rs rename to primitives/state-machine/src/overlayed_changes/mod.rs index b0259c2b85..9a2b1c4197 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -17,6 +17,8 @@ //! The overlayed changes to state. +mod changeset; + use crate::{ backend::Backend, ChangesTrieTransaction, changes_trie::{ @@ -25,14 +27,16 @@ use crate::{ }, stats::StateMachineStats, }; +use self::changeset::OverlayedChangeSet; -use std::{mem, ops, collections::{HashMap, BTreeMap, BTreeSet}}; +use std::collections::HashMap; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, ChildType}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use sp_core::offchain::storage::OffchainOverlayedChanges; - use hash_db::Hasher; +pub use self::changeset::{OverlayedValue, NoOpenTransaction, AlreadyInRuntime, NotInRuntime}; + /// Storage key. pub type StorageKey = Vec; @@ -45,43 +49,21 @@ pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; -/// The overlayed changes to state to be queried on top of the backend. +/// The set of changes that are overlaid onto the backend. /// -/// A transaction shares all prospective changes within an inner overlay -/// that can be cleared. +/// It allows changes to be modified using nestable transactions. #[derive(Debug, Default, Clone)] pub struct OverlayedChanges { - /// Changes that are not yet committed. - prospective: OverlayedChangeSet, - /// Committed changes. - committed: OverlayedChangeSet, + /// Top level storage changes. + top: OverlayedChangeSet, + /// Child storage changes. The map key is the child storage key without the common prefix. + children: HashMap, /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// Collect statistic on this execution. stats: StateMachineStats, } -/// The storage value, used inside OverlayedChanges. -#[derive(Debug, Default, Clone)] -#[cfg_attr(test, derive(PartialEq))] -pub struct OverlayedValue { - /// Current value. None if value has been deleted. - value: Option, - /// The set of extrinsic indices where the values has been changed. - /// Is filled only if runtime has announced changes trie support. - extrinsics: Option>, -} - -/// Prospective or committed overlayed change set. -#[derive(Debug, Default, Clone)] -#[cfg_attr(test, derive(PartialEq))] -pub struct OverlayedChangeSet { - /// Top level storage changes. - top: BTreeMap, - /// Child storage changes. The map key is the child storage key without the common prefix. - children_default: HashMap, ChildInfo)>, -} - /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. /// /// This contains all the changes to the storage and transactions to apply theses changes to the @@ -174,45 +156,10 @@ impl Default for StorageChanges } } -#[cfg(test)] -impl std::iter::FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { - fn from_iter>(iter: T) -> Self { - Self { - top: iter.into_iter().collect(), - children_default: Default::default(), - } - } -} - -impl OverlayedValue { - /// The most recent value contained in this overlay. - pub fn value(&self) -> Option<&StorageValue> { - self.value.as_ref() - } - - /// List of indices of extrinsics which modified the value using this overlay. - pub fn extrinsics(&self) -> Option> { - self.extrinsics.as_ref().map(|v| v.iter()) - } -} - -impl OverlayedChangeSet { - /// Whether the change set is empty. - pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children_default.is_empty() - } - - /// Clear the change set. - pub fn clear(&mut self) { - self.top.clear(); - self.children_default.clear(); - } -} - impl OverlayedChanges { - /// Whether the overlayed changes are empty. + /// Whether no changes are contained in the top nor in any of the child changes. pub fn is_empty(&self) -> bool { - self.prospective.is_empty() && self.committed.is_empty() + self.top.is_empty() && self.children.is_empty() } /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. @@ -224,326 +171,241 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn storage(&self, key: &[u8]) -> Option> { - self.prospective.top.get(key) - .or_else(|| self.committed.top.get(key)) - .map(|x| { - let size_read = x.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - x.value.as_ref().map(AsRef::as_ref) - }) - } - - /// Returns mutable reference to current changed value (prospective). - /// If there is no value in the overlay, the default callback is used to initiate - /// the value. - /// Warning this function register a change, so the mutable reference MUST be modified. + self.top.get(key).map(|x| { + let value = x.value(); + let size_read = value.map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + value.map(AsRef::as_ref) + }) + } + + /// Returns mutable reference to current value. + /// If there is no value in the overlay, the given callback is used to initiate the value. + /// Warning this function registers a change, so the mutable reference MUST be modified. + /// + /// Can be rolled back or committed when called inside a transaction. #[must_use = "A change was registered, so this value MUST be modified."] pub fn value_mut_or_insert_with( &mut self, key: &[u8], init: impl Fn() -> StorageValue, ) -> &mut StorageValue { - let extrinsic_index = self.extrinsic_index(); - let committed = &self.committed.top; - - let mut entry = self.prospective.top.entry(key.to_vec()) - .or_insert_with(|| { - if let Some(overlay_state) = committed.get(key).cloned() { - overlay_state - } else { - OverlayedValue { value: Some(init()), ..Default::default() } - } - }); - - //if was deleted initialise back with empty vec - if entry.value.is_none() { - entry.value = Some(Default::default()); - } - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - entry.value.as_mut().expect("Initialized above; qed") + let value = self.top.modify(key.to_owned(), init, self.extrinsic_index()); + + // if the value was deleted initialise it back with an empty vec + value.get_or_insert_with(StorageValue::default) } /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) { - if let Some(val) = map.0.get(key) { - let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - return Some(val.value.as_ref().map(AsRef::as_ref)); - } - } - - if let Some(map) = self.committed.children_default.get(child_info.storage_key()) { - if let Some(val) = map.0.get(key) { - let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - return Some(val.value.as_ref().map(AsRef::as_ref)); - } - } - - None + let map = self.children.get(child_info.storage_key())?; + let value = map.0.get(key)?.value(); + let size_read = value.map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + Some(value.map(AsRef::as_ref)) } - /// Inserts the given key-value pair into the prospective change set. + /// Set a new value for the specified key. /// - /// `None` can be used to delete a value specified by the given key. + /// Can be rolled back or committed when called inside a transaction. pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); - let extrinsic_index = self.extrinsic_index(); - let entry = self.prospective.top.entry(key).or_default(); - entry.value = val; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } + self.top.set(key, val, self.extrinsic_index()); } - /// Inserts the given key-value pair into the prospective child change set. + /// Set a new value for the specified key and child. /// /// `None` can be used to delete a value specified by the given key. + /// + /// Can be rolled back or committed when called inside a transaction. pub(crate) fn set_child_storage( &mut self, child_info: &ChildInfo, key: StorageKey, val: Option, ) { + let extrinsic_index = self.extrinsic_index(); let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); - let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); - let map_entry = self.prospective.children_default.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); + let top = &self.top; + let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| + ( + top.spawn_child(), + child_info.to_owned() + ) + ); + let updatable = info.try_update(child_info); debug_assert!(updatable); - - let entry = map_entry.0.entry(key).or_default(); - entry.value = val; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } + changeset.set(key, val, extrinsic_index); } /// Clear child storage of given storage key. /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective + /// Can be rolled back or committed when called inside a transaction. pub(crate) fn clear_child_storage( &mut self, child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); - let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); + let storage_key = child_info.storage_key().to_vec(); + let top = &self.top; + let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| + ( + top.spawn_child(), + child_info.to_owned() + ) + ); + let updatable = info.try_update(child_info); debug_assert!(updatable); - - map_entry.0.values_mut().for_each(|e| { - if let Some(extrinsic) = extrinsic_index { - e.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - - e.value = None; - }); - - if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) { - for (key, value) in committed_map.iter() { - if !map_entry.0.contains_key(key) { - map_entry.0.insert(key.clone(), OverlayedValue { - value: None, - extrinsics: extrinsic_index.map(|i| { - let mut e = value.extrinsics.clone() - .unwrap_or_else(|| BTreeSet::default()); - e.insert(i); - e - }), - }); - } - } - } + changeset.clear_where(|_, _| true, extrinsic_index); } /// Removes all key-value pairs which keys share the given prefix. /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective + /// Can be rolled back or committed when called inside a transaction. pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { - let extrinsic_index = self.extrinsic_index(); - - // Iterate over all prospective and mark all keys that share - // the given prefix as removed (None). - for (key, entry) in self.prospective.top.iter_mut() { - if key.starts_with(prefix) { - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - - // Then do the same with keys from committed changes. - // NOTE that we are making changes in the prospective change set. - for key in self.committed.top.keys() { - if key.starts_with(prefix) { - let entry = self.prospective.top.entry(key.clone()).or_default(); - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } + self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index()); } + /// Removes all key-value pairs which keys share the given prefix. + /// + /// Can be rolled back or committed when called inside a transaction pub(crate) fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); - let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); + let storage_key = child_info.storage_key().to_vec(); + let top = &self.top; + let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| + ( + top.spawn_child(), + child_info.to_owned() + ) + ); + let updatable = info.try_update(child_info); debug_assert!(updatable); + changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index); + } + + /// Returns the current nesting depth of the transaction stack. + /// + /// A value of zero means that no transaction is open and changes are committed on write. + pub fn transaction_depth(&self) -> usize { + // The top changeset and all child changesets transact in lockstep and are + // therefore always at the same transaction depth. + self.top.transaction_depth() + } + + /// Start a new nested transaction. + /// + /// This allows to either commit or roll back all changes that where made while this + /// transaction was open. Any transaction must be closed by either `rollback_transaction` or + /// `commit_transaction` before this overlay can be converted into storage changes. + /// + /// Changes made without any open transaction are committed immediatly. + pub fn start_transaction(&mut self) { + self.top.start_transaction(); + for (_, (changeset, _)) in self.children.iter_mut() { + changeset.start_transaction(); + } + } - for (key, entry) in map_entry.0.iter_mut() { - if key.starts_with(prefix) { - entry.value = None; + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. Returns an error if + /// there is no open transaction that can be rolled back. + pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.top.rollback_transaction()?; + self.children.retain(|_, (changeset, _)| { + changeset.rollback_transaction() + .expect("Top and children changesets are started in lockstep; qed"); + !changeset.is_empty() + }); + Ok(()) + } - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. Returns an error if there + /// is no open transaction that can be committed. + pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { + self.top.commit_transaction()?; + for (_, (changeset, _)) in self.children.iter_mut() { + changeset.commit_transaction() + .expect("Top and children changesets are started in lockstep; qed"); } + Ok(()) + } - if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) { - // Then do the same with keys from committed changes. - // NOTE that we are making changes in the prospective change set. - for key in child_committed.keys() { - if key.starts_with(prefix) { - let entry = map_entry.0.entry(key.clone()).or_default(); - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } + /// Call this before transfering control to the runtime. + /// + /// This protects all existing transactions from being removed by the runtime. + /// Calling this while already inside the runtime will return an error. + pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { + self.top.enter_runtime()?; + for (_, (changeset, _)) in self.children.iter_mut() { + changeset.enter_runtime() + .expect("Top and children changesets are entering runtime in lockstep; qed") } + Ok(()) } - /// Discard prospective changes to state. - pub fn discard_prospective(&mut self) { - self.prospective.clear(); - } - - /// Commit prospective changes to state. - pub fn commit_prospective(&mut self) { - if self.committed.is_empty() { - mem::swap(&mut self.prospective, &mut self.committed); - } else { - let top_to_commit = mem::replace(&mut self.prospective.top, BTreeMap::new()); - for (key, val) in top_to_commit.into_iter() { - let entry = self.committed.top.entry(key).or_default(); - entry.value = val.value; - - if let Some(prospective_extrinsics) = val.extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - for (storage_key, (map, child_info)) in self.prospective.children_default.drain() { - let child_content = self.committed.children_default.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info)); - // No update to child info at this point (will be needed for deletion). - for (key, val) in map.into_iter() { - let entry = child_content.0.entry(key).or_default(); - entry.value = val.value; - - if let Some(prospective_extrinsics) = val.extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - } + /// Call this when control returns from the runtime. + /// + /// This commits all dangling transaction left open by the runtime. + /// Calling this while outside the runtime will return an error. + pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { + self.top.exit_runtime()?; + for (_, (changeset, _)) in self.children.iter_mut() { + changeset.exit_runtime() + .expect("Top and children changesets are entering runtime in lockstep; qed"); } + Ok(()) } - /// Consume `OverlayedChanges` and take committed set. + /// Consume all changes (top + children) and return them. + /// + /// After calling this function no more changes are contained in this changeset. /// /// Panics: - /// Will panic if there are any uncommitted prospective changes. + /// Panics if `transaction_depth() > 0` fn drain_committed(&mut self) -> ( impl Iterator)>, impl Iterator)>, ChildInfo))>, ) { - assert!(self.prospective.is_empty()); + use std::mem::take; ( - std::mem::take(&mut self.committed.top) - .into_iter() - .map(|(k, v)| (k, v.value)), - std::mem::take(&mut self.committed.children_default) - .into_iter() - .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), + take(&mut self.top).drain_commited(), + take(&mut self.children).into_iter() + .map(|(key, (val, info))| ( + key, + (val.drain_commited(), info) + ) + ), ) } - /// Get an iterator over all pending and committed child tries in the overlay. - pub fn child_infos(&self) -> impl IntoIterator { - self.committed.children_default.iter() - .chain(self.prospective.children_default.iter()) - .map(|(_, v)| &v.1).collect::>() - } - - /// Get an iterator over all pending and committed changes. - /// - /// Supplying `None` for `child_info` will only return changes that are in the top - /// trie. Specifying some `child_info` will return only the changes in that - /// child trie. - pub fn changes(&self, child_info: Option<&ChildInfo>) - -> impl Iterator - { - let (committed, prospective) = if let Some(child_info) = child_info { - match child_info.child_type() { - ChildType::ParentKeyId => ( - self.committed.children_default.get(child_info.storage_key()).map(|c| &c.0), - self.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0), - ), - } - } else { - (Some(&self.committed.top), Some(&self.prospective.top)) - }; - committed.into_iter().flatten().chain(prospective.into_iter().flatten()) + /// Get an iterator over all child changes as seen by the current transaction. + pub fn children(&self) + -> impl Iterator, &ChildInfo)> { + self.children.iter().map(|(_, v)| (v.0.changes(), &v.1)) } - /// Return a clone of the currently pending changes. - pub fn clone_pending(&self) -> OverlayedChangeSet { - self.prospective.clone() + /// Get an iterator over all top changes as been by the current transaction. + pub fn changes(&self) -> impl Iterator { + self.top.changes() } - /// Replace the currently pending changes. - pub fn replace_pending(&mut self, pending: OverlayedChangeSet) { - self.prospective = pending; + /// Get an optional iterator over all child changes stored under the supplied key. + pub fn child_changes(&self, key: &[u8]) + -> Option<(impl Iterator, &ChildInfo)> { + self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } /// Convert this instance with all changes into a [`StorageChanges`] instance. @@ -607,10 +469,7 @@ impl OverlayedChanges { /// Inserts storage entry responsible for current extrinsic index. #[cfg(test)] pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { - self.prospective.top.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(extrinsic_index.encode()), - extrinsics: None, - }); + self.top.set(EXTRINSIC_INDEX.to_vec(), Some(extrinsic_index.encode()), None); } /// Returns current extrinsic index to use in changes trie construction. @@ -629,7 +488,8 @@ impl OverlayedChanges { } } - /// Generate the storage root using `backend` and all changes from `prospective` and `committed`. + /// Generate the storage root using `backend` and all changes + /// as seen by the current transaction. /// /// Returns the storage root and caches storage transaction in the given `cache`. pub fn storage_root>( @@ -639,35 +499,13 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let child_storage_keys = self.prospective.children_default.keys() - .chain(self.committed.children_default.keys()); - let child_delta_iter = child_storage_keys.map(|storage_key| - ( - self.default_child_info(storage_key) - .expect("child info initialized in either committed or prospective"), - self.committed.children_default.get(storage_key) - .into_iter() - .flat_map(|(map, _)| - map.iter().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))) - ) - .chain( - self.prospective.children_default.get(storage_key) - .into_iter() - .flat_map(|(map, _)| - map.iter().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))) - ) - ), - ) - ); + let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); + let child_delta = self.children() + .map(|(changes, info)| (info, changes.map( + |(k, v)| (&k[..], v.value().map(|v| &v[..])) + ))); - // compute and memoize - let delta = self.committed - .top - .iter() - .map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))) - .chain(self.prospective.top.iter().map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))); - - let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); + let (root, transaction) = backend.full_storage_root(delta, child_delta); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); @@ -704,41 +542,10 @@ impl OverlayedChanges { }) } - /// Get child info for a storage key. - /// Take the latest value so prospective first. - pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { - return Some(&ci); - } - if let Some((_, ci)) = self.committed.children_default.get(storage_key) { - return Some(&ci); - } - None - } - /// Returns the next (in lexicographic order) storage key in the overlayed alongside its value. /// If no value is next then `None` is returned. pub fn next_storage_key_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - - let next_prospective_key = self.prospective.top - .range::<[u8], _>(range) - .next() - .map(|(k, v)| (&k[..], v)); - - let next_committed_key = self.committed.top - .range::<[u8], _>(range) - .next() - .map(|(k, v)| (&k[..], v)); - - match (next_committed_key, next_prospective_key) { - // Committed is strictly less than prospective - (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => - Some(committed_key), - (committed_key, None) => committed_key, - // Prospective key is less or equal to committed or committed doesn't exist - (_, prospective_key) => prospective_key, - } + self.top.next_change(key) } /// Returns the next (in lexicographic order) child storage key in the overlayed alongside its @@ -748,48 +555,32 @@ impl OverlayedChanges { storage_key: &[u8], key: &[u8] ) -> Option<(&[u8], &OverlayedValue)> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - - let next_prospective_key = self.prospective.children_default.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - - let next_committed_key = self.committed.children_default.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - - match (next_committed_key, next_prospective_key) { - // Committed is strictly less than prospective - (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => - Some(committed_key), - (committed_key, None) => committed_key, - // Prospective key is less or equal to committed or committed doesn't exist - (_, prospective_key) => prospective_key, - } - } -} - -#[cfg(test)] -impl From> for OverlayedValue { - fn from(value: Option) -> OverlayedValue { - OverlayedValue { value, ..Default::default() } + self.children + .get(storage_key) + .and_then(|(overlay, _)| + overlay.next_change(key) + ) } } #[cfg(test)] mod tests { use hex_literal::hex; - use sp_core::{ - Blake2Hasher, traits::Externalities, storage::well_known_keys::EXTRINSIC_INDEX, - }; + use sp_core::{Blake2Hasher, traits::Externalities}; use crate::InMemoryBackend; use crate::ext::Ext; use super::*; + use std::collections::BTreeMap; - fn strip_extrinsic_index(map: &BTreeMap) - -> BTreeMap - { - let mut clone = map.clone(); - clone.remove(&EXTRINSIC_INDEX.to_vec()); - clone + fn assert_extrinsics( + overlay: &OverlayedChangeSet, + key: impl AsRef<[u8]>, + expected: Vec, + ) { + assert_eq!( + overlay.get(key.as_ref()).unwrap().extrinsics().cloned().collect::>(), + expected + ) } #[test] @@ -800,23 +591,28 @@ mod tests { assert!(overlayed.storage(&key).is_none()); + overlayed.start_transaction(); + overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - overlayed.commit_prospective(); + overlayed.commit_transaction().unwrap(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + overlayed.start_transaction(); + overlayed.set_storage(key.clone(), Some(vec![])); assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); overlayed.set_storage(key.clone(), None); assert!(overlayed.storage(&key).unwrap().is_none()); - overlayed.discard_prospective(); + overlayed.rollback_transaction().unwrap(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); overlayed.set_storage(key.clone(), None); - overlayed.commit_prospective(); assert!(overlayed.storage(&key).unwrap().is_none()); } @@ -829,18 +625,18 @@ mod tests { (b"doug".to_vec(), b"notadog".to_vec()), ].into_iter().collect(); let backend = InMemoryBackend::::from(initial); - let mut overlay = OverlayedChanges { - committed: vec![ - (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), - (b"dogglesworth".to_vec(), Some(b"catYYY".to_vec()).into()), - (b"doug".to_vec(), Some(vec![]).into()), - ].into_iter().collect(), - prospective: vec![ - (b"dogglesworth".to_vec(), Some(b"cat".to_vec()).into()), - (b"doug".to_vec(), None.into()), - ].into_iter().collect(), - ..Default::default() - }; + let mut overlay = OverlayedChanges::default(); + overlay.set_collect_extrinsics(false); + + overlay.start_transaction(); + overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"catYYY".to_vec())); + overlay.set_storage(b"doug".to_vec(), Some(vec![])); + overlay.commit_transaction().unwrap(); + + overlay.start_transaction(); + overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); + overlay.set_storage(b"doug".to_vec(), None); let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); @@ -862,6 +658,8 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(true); + overlay.start_transaction(); + overlay.set_storage(vec![100], Some(vec![101])); overlay.set_extrinsic_index(0); @@ -873,17 +671,11 @@ mod tests { overlay.set_extrinsic_index(2); overlay.set_storage(vec![1], Some(vec![6])); - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), - extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), - extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); + assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&overlay.top, vec![3], vec![1]); + assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); - overlay.commit_prospective(); + overlay.start_transaction(); overlay.set_extrinsic_index(3); overlay.set_storage(vec![3], Some(vec![7])); @@ -891,75 +683,53 @@ mod tests { overlay.set_extrinsic_index(4); overlay.set_storage(vec![1], Some(vec![8])); - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), - extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), - extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), - extrinsics: Some(vec![4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), - extrinsics: Some(vec![3].into_iter().collect()) }), - ].into_iter().collect()); - - overlay.commit_prospective(); - - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), - extrinsics: Some(vec![0, 2, 4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), - extrinsics: Some(vec![1, 3].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(overlay.prospective, - Default::default()); + assert_extrinsics(&overlay.top, vec![1], vec![0, 2, 4]); + assert_extrinsics(&overlay.top, vec![3], vec![1, 3]); + assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); + + overlay.rollback_transaction().unwrap(); + + assert_extrinsics(&overlay.top, vec![1], vec![0, 2]); + assert_extrinsics(&overlay.top, vec![3], vec![1]); + assert_extrinsics(&overlay.top, vec![100], vec![NO_EXTRINSIC_INDEX]); } #[test] fn next_storage_key_change_works() { let mut overlay = OverlayedChanges::default(); + overlay.start_transaction(); overlay.set_storage(vec![20], Some(vec![20])); overlay.set_storage(vec![30], Some(vec![30])); overlay.set_storage(vec![40], Some(vec![40])); - overlay.commit_prospective(); + overlay.commit_transaction().unwrap(); overlay.set_storage(vec![10], Some(vec![10])); overlay.set_storage(vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.next_storage_key_change(&[5]).unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); - assert_eq!(next_to_5.1.value, Some(vec![10])); + assert_eq!(next_to_5.1.value(), Some(&vec![10])); // next_committed < next_prospective let next_to_10 = overlay.next_storage_key_change(&[10]).unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); - assert_eq!(next_to_10.1.value, Some(vec![20])); + assert_eq!(next_to_10.1.value(), Some(&vec![20])); // next_committed == next_prospective let next_to_20 = overlay.next_storage_key_change(&[20]).unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); - assert_eq!(next_to_20.1.value, None); + assert_eq!(next_to_20.1.value(), None); // next_committed, no next_prospective let next_to_30 = overlay.next_storage_key_change(&[30]).unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); - assert_eq!(next_to_30.1.value, Some(vec![40])); + assert_eq!(next_to_30.1.value(), Some(&vec![40])); overlay.set_storage(vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.next_storage_key_change(&[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); - assert_eq!(next_to_40.1.value, Some(vec![50])); + assert_eq!(next_to_40.1.value(), Some(&vec![50])); } #[test] @@ -968,37 +738,38 @@ mod tests { let child_info = &child_info; let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); + overlay.start_transaction(); overlay.set_child_storage(child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child_info, vec![30], Some(vec![30])); overlay.set_child_storage(child_info, vec![40], Some(vec![40])); - overlay.commit_prospective(); + overlay.commit_transaction().unwrap(); overlay.set_child_storage(child_info, vec![10], Some(vec![10])); overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); - assert_eq!(next_to_5.1.value, Some(vec![10])); + assert_eq!(next_to_5.1.value(), Some(&vec![10])); // next_committed < next_prospective let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); - assert_eq!(next_to_10.1.value, Some(vec![20])); + assert_eq!(next_to_10.1.value(), Some(&vec![20])); // next_committed == next_prospective let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); - assert_eq!(next_to_20.1.value, None); + assert_eq!(next_to_20.1.value(), None); // next_committed, no next_prospective let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); - assert_eq!(next_to_30.1.value, Some(vec![40])); + assert_eq!(next_to_30.1.value(), Some(&vec![40])); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); - assert_eq!(next_to_40.1.value, Some(vec![50])); + assert_eq!(next_to_40.1.value(), Some(&vec![50])); } } diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 817282f8e7..2a5d7fda36 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -170,6 +170,18 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_changes_root is not supported in ReadOnlyExternalities") } + fn storage_start_transaction(&mut self) { + unimplemented!("Transactions are not supported by ReadOnlyExternalities"); + } + + fn storage_rollback_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by ReadOnlyExternalities"); + } + + fn storage_commit_transaction(&mut self) -> Result<(), ()> { + unimplemented!("Transactions are not supported by ReadOnlyExternalities"); + } + fn wipe(&mut self) {} fn commit(&mut self) {} diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 90da547983..cccb044f7e 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -153,15 +153,15 @@ impl TestExternalities /// Return a new backend with all pending value. pub fn commit_all(&self) -> InMemoryBackend { - let top: Vec<_> = self.overlay.changes(None) + let top: Vec<_> = self.overlay.changes() .map(|(k, v)| (k.clone(), v.value().cloned())) .collect(); let mut transaction = vec![(None, top)]; - for child_info in self.overlay.child_infos() { + for (child_changes, child_info) in self.overlay.children() { transaction.push(( Some(child_info.clone()), - self.overlay.changes(Some(child_info)) + child_changes .map(|(k, v)| (k.clone(), v.value().cloned())) .collect(), )) -- GitLab From fed834c81ca3577dd5bffe426141b9b363f7abdc Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Tue, 23 Jun 2020 12:09:47 +0200 Subject: [PATCH 054/144] Optimize offchain worker api by re-using http-client (#6454) * Fix typo in offchain's docs * Use Self keyword in AsyncApi::new() * Move httpclient to be part of OffchainWorkers to optimize block import * Fix compilation errors for tests * Add wrapper struct for HyperClient * Use lazy_static share SharedClient amongst OffchainWorkers. Remove the need to raise the fd limit * Revert "Use lazy_static share SharedClient amongst OffchainWorkers. Remove the need to raise the fd limit" This reverts commit 7af97498a2383b5d7405e27823db8fd97245da41. * Add lazy_static for tests --- Cargo.lock | 2 +- client/offchain/Cargo.toml | 2 +- client/offchain/src/api.rs | 11 +++++--- client/offchain/src/api/http.rs | 37 +++++++++++++++++++-------- client/offchain/src/api/http_dummy.rs | 12 ++++++++- client/offchain/src/lib.rs | 7 ++++- 6 files changed, 53 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 930cb554c7..4ffae8d562 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6573,12 +6573,12 @@ version = "2.0.0-rc3" dependencies = [ "bytes 0.5.4", "env_logger 0.7.1", - "fdlimit", "fnv", "futures 0.3.4", "futures-timer 3.0.2", "hyper 0.13.4", "hyper-rustls", + "lazy_static", "log", "num_cpus", "parity-scale-codec", diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index a9cd8c4dea..819d6ac3a5 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -37,12 +37,12 @@ hyper-rustls = "0.20" [dev-dependencies] env_logger = "0.7.0" -fdlimit = "0.1.4" sc-client-db = { version = "0.8.0-rc3", default-features = true, path = "../db/" } sc-transaction-pool = { version = "2.0.0-rc3", path = "../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } tokio = "0.2" +lazy_static = "1.4.0" [features] default = [] diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index a7f4ecbc58..0aa5d4ad78 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -31,6 +31,7 @@ use sp_core::offchain::{ OpaqueNetworkState, OpaquePeerId, OpaqueMultiaddr, StorageKind, }; pub use sp_offchain::STORAGE_PREFIX; +pub use http::SharedClient; #[cfg(not(target_os = "unknown"))] mod http; @@ -260,8 +261,9 @@ impl AsyncApi { db: S, network_state: Arc, is_validator: bool, - ) -> (Api, AsyncApi) { - let (http_api, http_worker) = http::http(); + shared_client: SharedClient, + ) -> (Api, Self) { + let (http_api, http_worker) = http::http(shared_client); let api = Api { db, @@ -270,7 +272,7 @@ impl AsyncApi { http: http_api, }; - let async_api = AsyncApi { + let async_api = Self { http: Some(http_worker), }; @@ -308,11 +310,14 @@ mod tests { let _ = env_logger::try_init(); let db = LocalStorage::new_test(); let mock = Arc::new(MockNetworkStateInfo()); + let shared_client = SharedClient::new(); + AsyncApi::new( db, mock, false, + shared_client, ) } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 91a673872f..1f542b7c11 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -33,9 +33,22 @@ use log::error; use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; use std::{convert::TryFrom, fmt, io::Read as _, pin::Pin, task::{Context, Poll}}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use std::sync::Arc; +use hyper::{Client as HyperClient, Body, client}; +use hyper_rustls::HttpsConnector; + +/// Wrapper struct used for keeping the hyper_rustls client running. +#[derive(Clone)] +pub struct SharedClient(Arc, Body>>); + +impl SharedClient { + pub fn new() -> Self { + Self(Arc::new(HyperClient::builder().build(HttpsConnector::new()))) + } +} /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. -pub fn http() -> (HttpApi, HttpWorker) { +pub fn http(shared_client: SharedClient) -> (HttpApi, HttpWorker) { let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker"); let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api"); @@ -51,7 +64,7 @@ pub fn http() -> (HttpApi, HttpWorker) { let engine = HttpWorker { to_api, from_api, - http_client: hyper::Client::builder().build(hyper_rustls::HttpsConnector::new()), + http_client: shared_client.0, requests: Vec::new(), }; @@ -551,7 +564,7 @@ pub struct HttpWorker { /// Used to receive messages from the `HttpApi`. from_api: TracingUnboundedReceiver, /// The engine that runs HTTP requests. - http_client: hyper::Client, hyper::Body>, + http_client: Arc, Body>>, /// HTTP requests that are being worked on by the engine. requests: Vec<(HttpRequestId, HttpWorkerRequest)>, } @@ -685,21 +698,23 @@ impl fmt::Debug for HttpWorkerRequest { mod tests { use core::convert::Infallible; use crate::api::timestamp; - use super::http; + use super::{http, SharedClient}; use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Duration}; use futures::future; + use lazy_static::lazy_static; + + // Using lazy_static to avoid spawning lots of different SharedClients, + // as spawning a SharedClient is CPU-intensive and opens lots of fds. + lazy_static! { + static ref SHARED_CLIENT: SharedClient = SharedClient::new(); + } // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP // server that runs in the background as well. macro_rules! build_api_server { () => {{ - // We spawn quite a bit of HTTP servers here due to how async API - // works for offchain workers, so be sure to raise the FD limit - // (particularly useful for macOS where the default soft limit may - // not be enough). - fdlimit::raise_fd_limit(); - - let (api, worker) = http(); + let hyper_client = SHARED_CLIENT.clone(); + let (api, worker) = http(hyper_client.clone()); let (addr_tx, addr_rx) = std::sync::mpsc::channel(); std::thread::spawn(move || { diff --git a/client/offchain/src/api/http_dummy.rs b/client/offchain/src/api/http_dummy.rs index 5ff77a1068..1c83325c93 100644 --- a/client/offchain/src/api/http_dummy.rs +++ b/client/offchain/src/api/http_dummy.rs @@ -19,8 +19,18 @@ use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; use std::{future::Future, pin::Pin, task::Context, task::Poll}; +/// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client running. +#[derive(Clone)] +pub struct SharedClient; + +impl SharedClient { + pub fn new() -> Self { + Self + } +} + /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. -pub fn http() -> (HttpApi, HttpWorker) { +pub fn http(_: SharedClient) -> (HttpApi, HttpWorker) { (HttpApi, HttpWorker) } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index a1ea16a72e..7c90065746 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -19,7 +19,7 @@ //! The offchain workers is a special function of the runtime that //! gets executed after block is imported. During execution //! it's able to asynchronously submit extrinsics that will either -//! be propagated to other nodes added to the next block +//! be propagated to other nodes or added to the next block //! produced by the node as unsigned transactions. //! //! Offchain workers can be used for computation-heavy tasks @@ -46,6 +46,7 @@ use sp_runtime::{generic::BlockId, traits::{self, Header}}; use futures::{prelude::*, future::ready}; mod api; +use api::SharedClient; pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; @@ -55,16 +56,19 @@ pub struct OffchainWorkers { db: Storage, _block: PhantomData, thread_pool: Mutex, + shared_client: SharedClient, } impl OffchainWorkers { /// Creates new `OffchainWorkers`. pub fn new(client: Arc, db: Storage) -> Self { + let shared_client = SharedClient::new(); Self { client, db, _block: PhantomData, thread_pool: Mutex::new(ThreadPool::new(num_cpus::get())), + shared_client, } } } @@ -120,6 +124,7 @@ impl OffchainWorkers< self.db.clone(), network_state.clone(), is_validator, + self.shared_client.clone(), ); debug!("Spawning offchain workers at {:?}", at); let header = header.clone(); -- GitLab From cf7432a5ebef122ab685e0b9256a15c722661116 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 23 Jun 2020 12:42:28 +0200 Subject: [PATCH 055/144] Remove lingering runtime upgrades (#6476) * Remove lingering runtime upgrades * remove unused warnings * remove tests --- frame/democracy/src/lib.rs | 17 ------- frame/democracy/src/tests.rs | 1 - frame/democracy/src/tests/migration.rs | 45 ------------------ frame/indices/src/lib.rs | 12 +---- frame/multisig/src/lib.rs | 11 ----- frame/staking/src/lib.rs | 28 ++---------- frame/transaction-payment/src/lib.rs | 63 -------------------------- 7 files changed, 4 insertions(+), 173 deletions(-) delete mode 100644 frame/democracy/src/tests/migration.rs diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 841281c125..79cc136d45 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -160,7 +160,6 @@ use sp_runtime::{ use codec::{Encode, Decode, Input}; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, ensure, Parameter, - storage::IterableStorageMap, weights::{Weight, DispatchClass}, traits::{ Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, @@ -602,22 +601,6 @@ decl_module! { fn deposit_event() = default; - fn on_runtime_upgrade() -> Weight { - if let None = StorageVersion::get() { - StorageVersion::put(Releases::V1); - - DepositOf::::translate::< - (BalanceOf, Vec), _ - >(|_, (balance, accounts)| { - Some((accounts, balance)) - }); - - T::MaximumBlockWeight::get() - } else { - T::DbWeight::get().reads(1) - } - } - /// Propose a sensitive action to be taken. /// /// The dispatch origin of this call must be _Signed_ and the sender must diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index c1bab3c021..b92f4bd076 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -42,7 +42,6 @@ mod preimage; mod public_proposals; mod scheduling; mod voting; -mod migration; mod decoders; const AYE: Vote = Vote { aye: true, conviction: Conviction::None }; diff --git a/frame/democracy/src/tests/migration.rs b/frame/democracy/src/tests/migration.rs deleted file mode 100644 index cab8f7f5c9..0000000000 --- a/frame/democracy/src/tests/migration.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! The tests for migration. - -use super::*; -use frame_support::{storage::migration, Hashable, traits::OnRuntimeUpgrade}; -use substrate_test_utils::assert_eq_uvec; - -#[test] -fn migration() { - new_test_ext().execute_with(|| { - for i in 0..3 { - let k = i.twox_64_concat(); - let v: (BalanceOf, Vec) = (i * 1000, vec![i]); - migration::put_storage_value(b"Democracy", b"DepositOf", &k, v); - } - StorageVersion::kill(); - - Democracy::on_runtime_upgrade(); - - assert_eq!(StorageVersion::get(), Some(Releases::V1)); - assert_eq_uvec!( - DepositOf::::iter().collect::>(), - vec![ - (0, (vec![0u64], >::from(0u32))), - (1, (vec![1u64], >::from(1000u32))), - (2, (vec![2u64], >::from(2000u32))), - ] - ); - }) -} diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 048a5b9936..e58112403f 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{ StaticLookup, Member, LookupError, Zero, Saturating, AtLeast32Bit }; use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; -use frame_support::dispatch::{DispatchResult, Weight}; +use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; use frame_support::weights::constants::WEIGHT_PER_MICROS; use frame_system::{ensure_signed, ensure_root}; @@ -104,16 +104,6 @@ decl_module! { pub struct Module for enum Call where origin: T::Origin, system = frame_system { fn deposit_event() = default; - fn on_runtime_upgrade() -> Weight { - use frame_support::migration::{StorageIterator, put_storage_value}; - for (key, value) in StorageIterator::< - (T::AccountId, BalanceOf) - >::new(b"Indices", b"Accounts").drain() { - put_storage_value(b"Indices", b"Accounts", &key, (value.0, value.1, false)); - } - 1_000_000_000 - } - /// Assign an previously unassigned index. /// /// Payment: `Deposit` is reserved from the sender account. diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index fc7a6c25b3..bcea34f9b3 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -235,17 +235,6 @@ decl_module! { /// Deposit one of this module's events by using the default implementation. fn deposit_event() = default; - fn on_runtime_upgrade() -> Weight { - // Utility.Multisigs -> Multisig.Multisigs - use frame_support::migration::{StorageIterator, put_storage_value}; - for (key, value) in StorageIterator::< - Multisig, T::AccountId> - >::new(b"Utility", b"Multisigs").drain() { - put_storage_value(b"Multisig", b"Multisigs", &key, value); - } - 1_000_000_000 - } - /// Immediately dispatch a multi-signature call using a single approval from the caller. /// /// The dispatch origin for this call must be _Signed_. diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 63b427a5ab..de61b25483 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1278,9 +1278,9 @@ decl_module! { /// Number of eras that staked funds must remain bonded for. const BondingDuration: EraIndex = T::BondingDuration::get(); - /// Number of eras that slashes are deferred by, after computation. + /// Number of eras that slashes are deferred by, after computation. /// - /// This should be less than the bonding duration. + /// This should be less than the bonding duration. /// Set to 0 if slashes should be applied immediately, without opportunity for /// intervention. const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get(); @@ -1294,7 +1294,7 @@ decl_module! { /// length of a session will be pointless. const ElectionLookahead: T::BlockNumber = T::ElectionLookahead::get(); - /// Maximum number of balancing iterations to run in the offchain submission. + /// Maximum number of balancing iterations to run in the offchain submission. /// /// If set to 0, balance_solution will not be executed at all. const MaxIterations: u32 = T::MaxIterations::get(); @@ -1312,28 +1312,6 @@ decl_module! { fn deposit_event() = default; - fn on_runtime_upgrade() -> Weight { - #[allow(dead_code)] - mod inner { - pub struct Module(sp_std::marker::PhantomData); - frame_support::decl_storage! { - trait Store for Module as Staking { - pub MigrateEra: Option; - } - } - } - - if let Releases::V3_0_0 = StorageVersion::get() { - StorageVersion::put(Releases::V4_0_0); - inner::MigrateEra::kill(); - - T::DbWeight::get().reads_writes(1, 1) - } else { - T::DbWeight::get().reads(1) - } - } - - /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the /// election window has opened, if we are at the last session and less blocks than /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 31d0cfb20d..4d920f8ec5 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -230,38 +230,6 @@ decl_module! { ).unwrap(), ); } - - fn on_runtime_upgrade() -> Weight { - use frame_support::migration::take_storage_value; - use sp_std::convert::TryInto; - use frame_support::debug::native::error; - - type OldMultiplier = sp_runtime::FixedI128; - type OldInner = ::Inner; - type Inner = ::Inner; - - if let Releases::V1Ancient = StorageVersion::get() { - StorageVersion::put(Releases::V2); - - if let Some(old) = take_storage_value::( - b"TransactionPayment", - b"NextFeeMultiplier", - &[], - ) { - let inner = old.into_inner(); - let new_inner = >::try_into(inner) - .unwrap_or_default(); - let new = Multiplier::from_inner(new_inner); - NextFeeMultiplier::put(new); - T::DbWeight::get().reads_writes(1, 1) - } else { - error!("transaction-payment migration failed."); - T::DbWeight::get().reads(1) - } - } else { - T::DbWeight::get().reads(1) - } - } } } @@ -740,37 +708,6 @@ mod tests { PostDispatchInfo { actual_weight: None, } } - #[test] - fn migration_to_v2_works() { - use sp_runtime::FixedI128; - use frame_support::traits::OnRuntimeUpgrade; - - let with_old_multiplier = |mul: FixedI128, expected: FixedU128| { - ExtBuilder::default().build().execute_with(|| { - frame_support::migration::put_storage_value( - b"TransactionPayment", - b"NextFeeMultiplier", - &[], - mul, - ); - - assert_eq!(StorageVersion::get(), Releases::V1Ancient); - - TransactionPayment::on_runtime_upgrade(); - - assert_eq!(StorageVersion::get(), Releases::V2); - assert_eq!(NextFeeMultiplier::get(), expected); - }) - }; - - with_old_multiplier(FixedI128::saturating_from_integer(-1), FixedU128::zero()); - with_old_multiplier(FixedI128::saturating_from_rational(-1, 2), FixedU128::zero()); - with_old_multiplier( - FixedI128::saturating_from_rational(1, 2), - FixedU128::saturating_from_rational(1, 2), - ); - } - #[test] fn signed_extension_transaction_payment_work() { ExtBuilder::default() -- GitLab From 6221146c42ec20288880aab4fd39941920a3151f Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Tue, 23 Jun 2020 12:47:13 +0200 Subject: [PATCH 056/144] impl Debug for sc_service::Configuration (#6400) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit Forked at: d735e4d0b5378c227f81a5127a1d4544de112fd8 No parent branch. * Make sc_service::Configuration derive Debug * Replace task_executor fn's input by proper TaskExecutor type (cleaner) * impl From for TaskExecutor * Update client/cli/src/runner.rs * Add some doc, examples and tests * Replace Deref by fn spawn as suggested Co-authored-by: Bastian Köcher --- client/chain-spec/src/lib.rs | 6 +++ client/cli/src/config.rs | 9 ++-- client/cli/src/lib.rs | 7 +-- client/cli/src/runner.rs | 26 +++++----- client/db/src/lib.rs | 2 +- client/service/src/config.rs | 76 ++++++++++++++++++++++++++++-- client/service/src/lib.rs | 4 +- client/service/src/task_manager.rs | 15 +++--- client/service/test/src/lib.rs | 44 ++++++++++------- primitives/database/src/lib.rs | 6 +++ utils/browser/src/lib.rs | 3 +- 11 files changed, 139 insertions(+), 59 deletions(-) diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 6fb2694261..66bce2b136 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -158,3 +158,9 @@ pub trait ChainSpec: BuildStorage + Send { /// This will be used as storage at genesis. fn set_storage(&mut self, storage: Storage); } + +impl std::fmt::Debug for dyn ChainSpec { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "ChainSpec(name = {:?}, id = {:?})", self.name(), self.id()) + } +} diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 2c3cfe8419..0ff2d96b4c 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -28,15 +28,12 @@ use names::{Generator, Name}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::config::{ BasePath, Configuration, DatabaseConfig, ExtTransport, KeystoreConfig, NetworkConfiguration, - NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TaskType, - TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, + NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, + TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }; use sc_service::{ChainSpec, TracingReceiver}; -use std::future::Future; use std::net::SocketAddr; use std::path::PathBuf; -use std::pin::Pin; -use std::sync::Arc; /// The maximum number of characters for a node name. pub(crate) const NODE_NAME_MAX_LENGTH: usize = 32; @@ -409,7 +406,7 @@ pub trait CliConfiguration: Sized { fn create_configuration( &self, cli: &C, - task_executor: Arc + Send>>, TaskType) + Send + Sync>, + task_executor: TaskExecutor, ) -> Result { let is_dev = self.is_dev()?; let chain_id = self.chain_id(is_dev)?; diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 1acd5ee604..9623b08bfb 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -37,11 +37,8 @@ use log::info; pub use params::*; use regex::Regex; pub use runner::*; -use sc_service::{ChainSpec, Configuration, TaskType}; -use std::future::Future; +use sc_service::{ChainSpec, Configuration, TaskExecutor}; use std::io::Write; -use std::pin::Pin; -use std::sync::Arc; pub use structopt; use structopt::{ clap::{self, AppSettings}, @@ -199,7 +196,7 @@ pub trait SubstrateCli: Sized { fn create_configuration( &self, command: &T, - task_executor: Arc + Send>>, TaskType) + Send + Sync>, + task_executor: TaskExecutor, ) -> error::Result { command.create_configuration(self, task_executor) } diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index b068af0166..51ea2d2186 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -29,7 +29,7 @@ use sc_service::{AbstractService, Configuration, Role, ServiceBuilderCommand, Ta use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; use sp_version::RuntimeVersion; -use std::{fmt::Debug, marker::PhantomData, str::FromStr, sync::Arc}; +use std::{fmt::Debug, marker::PhantomData, str::FromStr}; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), Box> @@ -119,23 +119,21 @@ impl Runner { let tokio_runtime = build_runtime()?; let runtime_handle = tokio_runtime.handle().clone(); - let task_executor = Arc::new( - move |fut, task_type| { - match task_type { - TaskType::Async => { runtime_handle.spawn(fut); } - TaskType::Blocking => { - runtime_handle.spawn( async move { - // `spawn_blocking` is looking for the current runtime, and as such has to be called - // from within `spawn`. - tokio::task::spawn_blocking(move || futures::executor::block_on(fut)) - }); - } + let task_executor = move |fut, task_type| { + match task_type { + TaskType::Async => { runtime_handle.spawn(fut); } + TaskType::Blocking => { + runtime_handle.spawn(async move { + // `spawn_blocking` is looking for the current runtime, and as such has to + // be called from within `spawn`. + tokio::task::spawn_blocking(move || futures::executor::block_on(fut)) + }); } } - ); + }; Ok(Runner { - config: command.create_configuration(cli, task_executor)?, + config: command.create_configuration(cli, task_executor.into())?, tokio_runtime, phantom: PhantomData, }) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3bae234567..b4f4892a04 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -271,7 +271,7 @@ pub struct DatabaseSettings { } /// Where to find the database.. -#[derive(Clone)] +#[derive(Debug, Clone)] pub enum DatabaseSettingsSrc { /// Load a RocksDB database from a given path. Recommended for most uses. RocksDb { diff --git a/client/service/src/config.rs b/client/service/src/config.rs index b79831d57b..618cd19692 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -34,6 +34,7 @@ use prometheus_endpoint::Registry; use tempfile::TempDir; /// Service configuration. +#[derive(Debug)] pub struct Configuration { /// Implementation name pub impl_name: &'static str, @@ -42,7 +43,7 @@ pub struct Configuration { /// Node role. pub role: Role, /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. - pub task_executor: Arc + Send>>, TaskType) + Send + Sync>, + pub task_executor: TaskExecutor, /// Extrinsic pool configuration. pub transaction_pool: TransactionPoolOptions, /// Network configuration. @@ -120,7 +121,7 @@ pub enum TaskType { } /// Configuration of the client keystore. -#[derive(Clone)] +#[derive(Debug, Clone)] pub enum KeystoreConfig { /// Keystore at a path on-disk. Recommended for native nodes. Path { @@ -143,7 +144,7 @@ impl KeystoreConfig { } } /// Configuration of the database of the client. -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] pub struct OffchainWorkerConfig { /// If this is allowed. pub enabled: bool, @@ -152,7 +153,7 @@ pub struct OffchainWorkerConfig { } /// Configuration of the Prometheus endpoint. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct PrometheusConfig { /// Port to use. pub port: SocketAddr, @@ -199,6 +200,7 @@ impl Default for RpcMethods { } /// The base path that is used for everything that needs to be write on disk to run a node. +#[derive(Debug)] pub enum BasePath { /// A temporary directory is used as base path and will be deleted when dropped. #[cfg(not(target_os = "unknown"))] @@ -253,3 +255,69 @@ impl std::convert::From for BasePath { BasePath::new(path) } } + +type TaskExecutorInner = Arc + Send>>, TaskType) + Send + Sync>; + +/// Callable object that execute tasks. +/// +/// This struct can be created easily using `Into`. +/// +/// # Examples +/// +/// ## Using tokio +/// +/// ``` +/// # use sc_service::TaskExecutor; +/// # mod tokio { pub mod runtime { +/// # #[derive(Clone)] +/// # pub struct Runtime; +/// # impl Runtime { +/// # pub fn new() -> Result { Ok(Runtime) } +/// # pub fn handle(&self) -> &Self { &self } +/// # pub fn spawn(&self, _: std::pin::Pin + Send>>) {} +/// # } +/// # } } +/// use tokio::runtime::Runtime; +/// +/// let runtime = Runtime::new().unwrap(); +/// let handle = runtime.handle().clone(); +/// let task_executor: TaskExecutor = (move |future, _task_type| { +/// handle.spawn(future); +/// }).into(); +/// ``` +/// +/// ## Using async-std +/// +/// ``` +/// # use sc_service::TaskExecutor; +/// # mod async_std { pub mod task { +/// # pub fn spawn(_: std::pin::Pin + Send>>) {} +/// # } } +/// let task_executor: TaskExecutor = (|future, _task_type| { +/// async_std::task::spawn(future); +/// }).into(); +/// ``` +#[derive(Clone)] +pub struct TaskExecutor(TaskExecutorInner); + +impl std::fmt::Debug for TaskExecutor { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TaskExecutor") + } +} + +impl std::convert::From for TaskExecutor +where + F: Fn(Pin + Send>>, TaskType) + Send + Sync + 'static, +{ + fn from(x: F) -> Self { + Self(Arc::new(x)) + } +} + +impl TaskExecutor { + /// Spawns a new asynchronous task. + pub fn spawn(&self, future: Pin + Send>>, task_type: TaskType) { + self.0(future, task_type) + } +} diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index bfd048c759..036c957773 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -65,7 +65,9 @@ pub use self::builder::{ ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, }; -pub use config::{BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskType}; +pub use config::{ + BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, +}; pub use sc_chain_spec::{ ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, NoExtension, ChainType, diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs index 5a400f70df..544d76fc47 100644 --- a/client/service/src/task_manager.rs +++ b/client/service/src/task_manager.rs @@ -13,7 +13,7 @@ //! Substrate service tasks management module. -use std::{panic, pin::Pin, result::Result, sync::Arc}; +use std::{panic, result::Result}; use exit_future::Signal; use log::debug; use futures::{ @@ -29,18 +29,15 @@ use prometheus_endpoint::{ }; use sc_client_api::CloneableSpawn; use sp_utils::mpsc::TracingUnboundedSender; -use crate::config::TaskType; +use crate::config::{TaskExecutor, TaskType}; mod prometheus_future; -/// Type alias for service task executor (usually runtime). -pub type ServiceTaskExecutor = Arc + Send>>, TaskType) + Send + Sync>; - /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { on_exit: exit_future::Exit, - executor: ServiceTaskExecutor, + executor: TaskExecutor, metrics: Option, } @@ -113,7 +110,7 @@ impl SpawnTaskHandle { } }; - (self.executor)(Box::pin(future), task_type); + self.executor.spawn(Box::pin(future), task_type); } } @@ -216,7 +213,7 @@ pub struct TaskManager { /// A signal that makes the exit future above resolve, fired on service drop. signal: Option, /// How to spawn background tasks. - executor: ServiceTaskExecutor, + executor: TaskExecutor, /// Prometheus metric where to report the polling times. metrics: Option, } @@ -225,7 +222,7 @@ impl TaskManager { /// If a Prometheus registry is passed, it will be used to report statistics about the /// service tasks. pub(super) fn new( - executor: ServiceTaskExecutor, + executor: TaskExecutor, prometheus_registry: Option<&Registry> ) -> Result { let (signal, on_exit) = exit_future::signal(); diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index c440b118d5..441680e20c 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -38,7 +38,7 @@ use sc_service::{ RuntimeGenesis, Role, Error, - TaskType, + TaskExecutor, }; use sp_blockchain::HeaderBackend; use sc_network::{multiaddr, Multiaddr}; @@ -142,7 +142,7 @@ fn node_config, role: Role, - task_executor: Arc + Send>>, TaskType) + Send + Sync>, + task_executor: TaskExecutor, key_seed: Option, base_port: u16, root: &TempDir, @@ -256,17 +256,19 @@ impl TestNet where authorities: impl Iterator Result<(F, U), Error>)> ) { let executor = self.runtime.executor(); + let task_executor: TaskExecutor = { + let executor = executor.clone(); + (move |fut: Pin + Send>>, _| { + executor.spawn(fut.unit_error().compat()); + }).into() + }; for (key, authority) in authorities { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>, _| executor.spawn(fut.unit_error().compat())) - }; let node_config = node_config( self.nodes, &self.chain_spec, Role::Authority { sentry_nodes: Vec::new() }, - task_executor, + task_executor.clone(), Some(key), self.base_port, &temp, @@ -282,11 +284,15 @@ impl TestNet where } for full in full { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>, _| executor.spawn(fut.unit_error().compat())) - }; - let node_config = node_config(self.nodes, &self.chain_spec, Role::Full, task_executor, None, self.base_port, &temp); + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Full, + task_executor.clone(), + None, + self.base_port, + &temp, + ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = full(node_config).expect("Error creating test node service"); let service = SyncService::from(service); @@ -298,11 +304,15 @@ impl TestNet where } for light in light { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>, _| executor.spawn(fut.unit_error().compat())) - }; - let node_config = node_config(self.nodes, &self.chain_spec, Role::Light, task_executor, None, self.base_port, &temp); + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Light, + task_executor.clone(), + None, + self.base_port, + &temp, + ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let service = SyncService::from(light(node_config).expect("Error creating test node service")); diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index bc4c11f60a..1fb7b15666 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -165,6 +165,12 @@ pub trait Database: Send + Sync { } } +impl std::fmt::Debug for dyn Database { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "Database") + } +} + /// Call `f` with the value previously stored against `key` and return the result, or `None` if /// `key` is not currently in the database. /// diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index e804af6094..badb029bfe 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -17,7 +17,6 @@ use futures01::sync::mpsc as mpsc01; use log::{debug, info}; -use std::sync::Arc; use sc_network::config::TransportConfig; use sc_service::{ AbstractService, RpcSession, Role, Configuration, @@ -64,7 +63,7 @@ where network, telemetry_endpoints: chain_spec.telemetry_endpoints().clone(), chain_spec: Box::new(chain_spec), - task_executor: Arc::new(move |fut, _| wasm_bindgen_futures::spawn_local(fut)), + task_executor: (|fut, _| wasm_bindgen_futures::spawn_local(fut)).into(), telemetry_external_transport: Some(transport), role: Role::Light, database: { -- GitLab From c771821cae2dcb5a8808a19fae8122c0b9ae8499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Jun 2020 13:46:16 +0200 Subject: [PATCH 057/144] Fix `sp-api` handling of multiple arguments (#6484) With the switch to `decode_all_with_depth_limit` we silently broken support for functions with multiple arguments. The old generated code tried to decode each parameter separately, which does not play well with `decode_all`. This pr adds a test to ensure that this does not happen again and fixes the bug by decoding everything at once by wrapping it into tuples. --- .../api/proc-macro/src/impl_runtime_apis.rs | 19 ++++++++----------- primitives/api/test/tests/runtime_calls.rs | 11 +++++++++++ test-utils/runtime/src/lib.rs | 16 ++++++++++++++++ 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 8f9927cadc..4b5c1c4706 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -34,7 +34,7 @@ use syn::{ fold::{self, Fold}, parse_quote, }; -use std::{collections::HashSet, iter}; +use std::collections::HashSet; /// Unique identifier used to make the hidden includes unique for this macro. const HIDDEN_INCLUDES_ID: &str = "IMPL_RUNTIME_APIS"; @@ -71,10 +71,8 @@ fn generate_impl_call( let params = extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let c_iter = iter::repeat(&c); let fn_name = &signature.ident; - let fn_name_str = iter::repeat(fn_name.to_string()); - let input = iter::repeat(input); + let fn_name_str = fn_name.to_string(); let pnames = params.iter().map(|v| &v.0); let pnames2 = params.iter().map(|v| &v.0); let ptypes = params.iter().map(|v| &v.1); @@ -82,15 +80,14 @@ fn generate_impl_call( Ok( quote!( - #( - let #pnames : #ptypes = match #c_iter::DecodeLimit::decode_all_with_depth_limit( - #c_iter::MAX_EXTRINSIC_DEPTH, - &mut #input + let (#( #pnames ),*) : ( #( #ptypes ),* ) = + match #c::DecodeLimit::decode_all_with_depth_limit( + #c::MAX_EXTRINSIC_DEPTH, + &mut #input, ) { - Ok(input) => input, + Ok(res) => res, Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), }; - )* #[allow(deprecated)] <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) @@ -138,7 +135,7 @@ fn generate_impl_calls( /// Generate the dispatch function that is used in native to call into the runtime. fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { - let data = Ident::new("data", Span::call_site()); + let data = Ident::new("__sp_api__input_data", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); let impl_calls = generate_impl_calls(impls, &data)? .into_iter() diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 555104446a..6717ab7a3b 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -207,3 +207,14 @@ fn record_proof_works() { &runtime_code, ).expect("Executes block while using the proof backend"); } + +#[test] +fn call_runtime_api_with_multiple_arguments() { + let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); + + let data = vec![1, 2, 4, 5, 6, 7, 8, 8, 10, 12]; + let block_id = BlockId::Number(client.chain_info().best_number); + client.runtime_api() + .test_multiple_arguments(&block_id, data.clone(), data.clone(), data.len() as u32) + .unwrap(); +} diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index eaac618b44..1d376a0940 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -313,6 +313,9 @@ cfg_if! { fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic); /// Run various tests against storage. fn test_storage(); + /// Test that ensures that we can call a function that takes multiple + /// arguments. + fn test_multiple_arguments(data: Vec, other: Vec, num: u32); } } } else { @@ -359,6 +362,9 @@ cfg_if! { fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic); /// Run various tests against storage. fn test_storage(); + /// Test that ensures that we can call a function that takes multiple + /// arguments. + fn test_multiple_arguments(data: Vec, other: Vec, num: u32); } } } @@ -641,6 +647,11 @@ cfg_if! { test_read_storage(); test_read_child_storage(); } + + fn test_multiple_arguments(data: Vec, other: Vec, num: u32) { + assert_eq!(&data[..], &other[..]); + assert_eq!(data.len(), num as usize); + } } impl sp_consensus_aura::AuraApi for Runtime { @@ -862,6 +873,11 @@ cfg_if! { test_read_storage(); test_read_child_storage(); } + + fn test_multiple_arguments(data: Vec, other: Vec, num: u32) { + assert_eq!(&data[..], &other[..]); + assert_eq!(data.len(), num as usize); + } } impl sp_consensus_aura::AuraApi for Runtime { -- GitLab From ad7b5ef7f2131eadceb1f89a69381cd359ef1eb9 Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 23 Jun 2020 16:50:33 +0200 Subject: [PATCH 058/144] Fix the browser node and ensure it doesn't colour the informant output (#6457) * Fix browser informant * Fix documentation * Add an informant_output_format function to the cli config * Wrap informant output format in an option * Revert batch verifier * Remove wasm-timer from primitives io cargo lock * Drop informant_output_format function * derive debug for output format --- Cargo.lock | 1 + client/cli/src/config.rs | 1 + client/informant/src/lib.rs | 29 ++++++++++-- client/service/src/builder.rs | 46 +------------------ client/service/src/config.rs | 2 + client/service/test/src/lib.rs | 1 + primitives/consensus/common/Cargo.toml | 1 + .../consensus/common/src/import_queue.rs | 2 +- .../consensus/common/src/offline_tracker.rs | 3 +- utils/browser/src/lib.rs | 4 ++ 10 files changed, 40 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ffae8d562..64524d8c52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7442,6 +7442,7 @@ dependencies = [ "sp-utils", "sp-version", "substrate-prometheus-endpoint", + "wasm-timer", ] [[package]] diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 0ff2d96b4c..598acd0ab9 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -474,6 +474,7 @@ pub trait CliConfiguration: Sized { announce_block: self.announce_block()?, role, base_path: Some(base_path), + informant_output_format: Default::default(), }) } diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 6a8acbadc3..d56afcf335 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -34,14 +34,37 @@ use parking_lot::Mutex; mod display; /// The format to print telemetry output in. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct OutputFormat { - /// Enable color output in logs. + /// Enable color output in logs. True by default. pub enable_color: bool, - /// Add a prefix before every log line + /// Defines the informant's prefix for the logs. An empty string by default. + /// + /// By default substrate will show logs without a prefix. Example: + /// + /// ```text + /// 2020-05-28 15:11:06 ✨ Imported #2 (0xc21c…2ca8) + /// 2020-05-28 15:11:07 💤 Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0 + /// ``` + /// + /// But you can define a prefix by setting this string. This will output: + /// + /// ```text + /// 2020-05-28 15:11:06 ✨ [Prefix] Imported #2 (0xc21c…2ca8) + /// 2020-05-28 15:11:07 💤 [Prefix] Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0 + /// ``` pub prefix: String, } +impl Default for OutputFormat { + fn default() -> Self { + Self { + enable_color: true, + prefix: String::new(), + } + } +} + /// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os = "unknown")`. #[cfg(target_os = "unknown")] pub trait TransactionPoolAndMaybeMallogSizeOf: TransactionPool {} diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index f492c5d494..eebc825b21 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -102,7 +102,6 @@ pub struct ServiceBuilder>>, marker: PhantomData<(TBl, TRtApi)>, block_announce_validator_builder: Option) -> Box + Send> + Send>>, - informant_prefix: String, } /// A utility trait for building an RPC extension given a `DenyUnsafe` instance. @@ -366,7 +365,6 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { rpc_extensions_builder: Box::new(|_| ()), remote_backend: None, block_announce_validator_builder: None, - informant_prefix: Default::default(), marker: PhantomData, }) } @@ -450,7 +448,6 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { rpc_extensions_builder: Box::new(|_| ()), remote_backend: Some(remote_blockchain), block_announce_validator_builder: None, - informant_prefix: Default::default(), marker: PhantomData, }) } @@ -545,7 +542,6 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -591,7 +587,6 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -630,7 +625,6 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -697,7 +691,6 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -754,7 +747,6 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -792,7 +784,6 @@ impl rpc_extensions_builder: Box::new(rpc_extensions_builder), remote_backend: self.remote_backend, block_announce_validator_builder: self.block_announce_validator_builder, - informant_prefix: self.informant_prefix, marker: self.marker, }) } @@ -838,43 +829,9 @@ impl rpc_extensions_builder: self.rpc_extensions_builder, remote_backend: self.remote_backend, block_announce_validator_builder: Some(Box::new(block_announce_validator_builder)), - informant_prefix: self.informant_prefix, marker: self.marker, }) } - - /// Defines the informant's prefix for the logs. An empty string by default. - /// - /// By default substrate will show logs without a prefix. Example: - /// - /// ```text - /// 2020-05-28 15:11:06 ✨ Imported #2 (0xc21c…2ca8) - /// 2020-05-28 15:11:07 💤 Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0 - /// ``` - /// - /// But you can define a prefix by using this function. Example: - /// - /// ```rust,ignore - /// service.with_informant_prefix("[Prefix] ".to_string()); - /// ``` - /// - /// This will output: - /// - /// ```text - /// 2020-05-28 15:11:06 ✨ [Prefix] Imported #2 (0xc21c…2ca8) - /// 2020-05-28 15:11:07 💤 [Prefix] Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0 - /// ``` - pub fn with_informant_prefix( - self, - informant_prefix: String, - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - Ok(ServiceBuilder { - informant_prefix: informant_prefix, - ..self - }) - } } /// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate @@ -990,7 +947,6 @@ ServiceBuilder< rpc_extensions_builder, remote_backend, block_announce_validator_builder, - informant_prefix, } = self; sp_session::generate_initial_session_keys( @@ -1142,7 +1098,7 @@ ServiceBuilder< client.clone(), network_status_sinks.clone(), transaction_pool.clone(), - sc_informant::OutputFormat { enable_color: true, prefix: informant_prefix }, + config.informant_output_format, )); Ok(Service { diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 618cd19692..fb4dbc666a 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -109,6 +109,8 @@ pub struct Configuration { pub announce_block: bool, /// Base path of the configuration pub base_path: Option, + /// Configuration of the output format that the informant uses. + pub informant_output_format: sc_informant::OutputFormat, } /// Type for tasks spawned by the executor. diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 441680e20c..4ff89f5319 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -212,6 +212,7 @@ fn node_config, Transaction r => return Ok(r), // Any other successful result means that the block is already imported. } - let started = std::time::Instant::now(); + let started = wasm_timer::Instant::now(); let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justification, block.body) .map_err(|msg| { if let Some(ref peer) = peer { diff --git a/primitives/consensus/common/src/offline_tracker.rs b/primitives/consensus/common/src/offline_tracker.rs index 9269640ffc..b96498041f 100644 --- a/primitives/consensus/common/src/offline_tracker.rs +++ b/primitives/consensus/common/src/offline_tracker.rs @@ -18,7 +18,8 @@ //! Tracks offline validators. use std::collections::HashMap; -use std::time::{Instant, Duration}; +use std::time::Duration; +use wasm_timer::Instant; // time before we report a validator. const REPORT_TIME: Duration = Duration::from_secs(60 * 5); diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index badb029bfe..799fe9788c 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -98,6 +98,10 @@ where max_runtime_instances: 8, announce_block: true, base_path: None, + informant_output_format: sc_informant::OutputFormat { + enable_color: false, + prefix: String::new(), + }, }; Ok(config) -- GitLab From 8baaa18f58515c2687cd66553c63799872d91655 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 23 Jun 2020 17:09:01 +0200 Subject: [PATCH 059/144] bound some missing bound for elevated trait (#6487) --- frame/balances/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index e882bdf349..f7ccb86e60 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -862,8 +862,8 @@ impl, I: Instance> frame_system::Trait for ElevatedTrait { type BlockHashCount = T::BlockHashCount; type MaximumBlockWeight = T::MaximumBlockWeight; type DbWeight = T::DbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); + type BlockExecutionWeight = T::BlockExecutionWeight; + type ExtrinsicBaseWeight = T::ExtrinsicBaseWeight; type MaximumExtrinsicWeight = T::MaximumBlockWeight; type MaximumBlockLength = T::MaximumBlockLength; type AvailableBlockRatio = T::AvailableBlockRatio; -- GitLab From 034055a092bd376c3b7252b58f1777de70dfad59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Jun 2020 17:25:19 +0200 Subject: [PATCH 060/144] `pallet-scheduler`: Check that `when` is not in the past (#6480) * `pallet-scheduler`: Check that `when` is not in the past * Break some lines --- frame/scheduler/src/lib.rs | 109 +++++++++++++++++++++++++++++------- frame/support/src/traits.rs | 2 +- 2 files changed, 89 insertions(+), 22 deletions(-) diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 18b4eef0a8..6b47e62587 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -119,6 +119,8 @@ decl_error! { FailedToSchedule, /// Failed to cancel a scheduled call FailedToCancel, + /// Given target block number is in the past. + TargetBlockNumberInPast, } } @@ -145,7 +147,7 @@ decl_module! { call: Box<::Call>, ) { ensure_root(origin)?; - let _ = Self::do_schedule(when, maybe_periodic, priority, *call); + Self::do_schedule(when, maybe_periodic, priority, *call)?; } /// Cancel an anonymously scheduled task. @@ -294,7 +296,11 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, call: ::Call - ) -> TaskAddress { + ) -> Result, DispatchError> { + if when <= frame_system::Module::::block_number() { + return Err(Error::::TargetBlockNumberInPast.into()) + } + // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) @@ -304,7 +310,8 @@ impl Module { Agenda::::append(when, s); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; Self::deposit_event(RawEvent::Scheduled(when, index)); - (when, index) + + Ok((when, index)) } fn do_cancel((when, index): TaskAddress) -> Result<(), DispatchError> { @@ -331,6 +338,10 @@ impl Module { return Err(Error::::FailedToSchedule)? } + if when <= frame_system::Module::::block_number() { + return Err(Error::::TargetBlockNumberInPast.into()) + } + // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) @@ -343,6 +354,7 @@ impl Module { let address = (when, index); Lookup::::insert(&id, &address); Self::deposit_event(RawEvent::Scheduled(when, index)); + Ok(address) } @@ -366,7 +378,7 @@ impl schedule::Anon::Call> for Module maybe_periodic: Option>, priority: schedule::Priority, call: ::Call - ) -> Self::Address { + ) -> Result { Self::do_schedule(when, maybe_periodic, priority, call) } @@ -399,8 +411,7 @@ mod tests { use frame_support::{ impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, - traits::{OnInitialize, OnFinalize, Filter}, - weights::constants::RocksDbWeight, + assert_err, traits::{OnInitialize, OnFinalize, Filter}, weights::constants::RocksDbWeight, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures @@ -551,7 +562,7 @@ mod tests { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); assert!(!::BaseCallFilter::filter(&call)); - Scheduler::do_schedule(4, None, 127, call); + let _ = Scheduler::do_schedule(4, None, 127, call); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -565,7 +576,7 @@ mod tests { fn periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. - Scheduler::do_schedule(4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))); + let _ = Scheduler::do_schedule(4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -588,7 +599,7 @@ mod tests { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named(1u32.encode(), 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); - let i = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + let i = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))).unwrap(); run_to_block(3); assert!(logger::log().is_empty()); assert_ok!(Scheduler::do_cancel_named(1u32.encode())); @@ -621,8 +632,8 @@ mod tests { #[test] fn scheduler_respects_weight_limits() { new_test_ext().execute_with(|| { - Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); // 69 and 42 do not fit together run_to_block(4); assert_eq!(logger::log(), vec![42u32]); @@ -634,8 +645,8 @@ mod tests { #[test] fn scheduler_respects_hard_deadlines_more() { new_test_ext().execute_with(|| { - Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); // With base weights, 69 and 42 should not fit together, but do because of hard deadlines run_to_block(4); assert_eq!(logger::log(), vec![42u32, 69u32]); @@ -645,8 +656,8 @@ mod tests { #[test] fn scheduler_respects_priority_ordering() { new_test_ext().execute_with(|| { - Scheduler::do_schedule(4, None, 1, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 1, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); run_to_block(4); assert_eq!(logger::log(), vec![69u32, 42u32]); }); @@ -655,9 +666,24 @@ mod tests { #[test] fn scheduler_respects_priority_ordering_with_soft_deadlines() { new_test_ext().execute_with(|| { - Scheduler::do_schedule(4, None, 255, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3))); - Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); - Scheduler::do_schedule(4, None, 126, Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule( + 4, + None, + 255, + Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)), + ); + let _ = Scheduler::do_schedule( + 4, + None, + 127, + Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)), + ); + let _ = Scheduler::do_schedule( + 4, + None, + 126, + Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)), + ); // 2600 does not fit with 69 or 42, but has higher priority, so will go through run_to_block(4); @@ -679,11 +705,27 @@ mod tests { // Named assert_ok!(Scheduler::do_schedule_named(1u32.encode(), 1, None, 255, Call::Logger(logger::Call::log(3, MaximumSchedulerWeight::get() / 3)))); // Anon Periodic - Scheduler::do_schedule(1, Some((1000, 3)), 128, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3))); + let _ = Scheduler::do_schedule( + 1, + Some((1000, 3)), + 128, + Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)), + ); // Anon - Scheduler::do_schedule(1, None, 127, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule( + 1, + None, + 127, + Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)), + ); // Named Periodic - assert_ok!(Scheduler::do_schedule_named(2u32.encode(), 1, Some((1000, 3)), 126, Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)))); + assert_ok!(Scheduler::do_schedule_named( + 2u32.encode(), + 1, + Some((1000, 3)), + 126, + Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)), + )); // Will include the named periodic only let actual_weight = Scheduler::on_initialize(1); @@ -727,4 +769,29 @@ mod tests { assert!(logger::log().is_empty()); }); } + + #[test] + fn fails_to_schedule_task_in_the_past() { + new_test_ext().execute_with(|| { + run_to_block(3); + + let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); + let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); + + assert_err!( + Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call), + Error::::TargetBlockNumberInPast, + ); + + assert_err!( + Scheduler::schedule(Origin::root(), 2, None, 127, call2.clone()), + Error::::TargetBlockNumberInPast, + ); + + assert_err!( + Scheduler::schedule(Origin::root(), 3, None, 127, call2), + Error::::TargetBlockNumberInPast, + ); + }); + } } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 9a2dbf2b29..625f216b1b 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1501,7 +1501,7 @@ pub mod schedule { maybe_periodic: Option>, priority: Priority, call: Call - ) -> Self::Address; + ) -> Result; /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, /// also. -- GitLab From 4c03656ac128b22ae813617727386541a1c10be8 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Jun 2020 17:25:42 +0200 Subject: [PATCH 061/144] client/network/service: Add primary dimension to connection metrics (#6472) * client/network/service: Add primary dimension to connection metrics Two nodes can be interconnected via one or more connections. The first of those connections is called the primary connection. This commit adds another dimension to the `sub_libp2p_connections_{closed,opened}_total` metrics to differentiate primary and non-primary connections being opened / closed. By intuition more than one connection between two nodes is rare. Tracking the fact whether a connection is primary or not will help prove or disprove this intuition. * .maintain/monitoring: Ensure to sum over all connections_closed variants * client/network/service: Rename is_primary to is_first * client/network/service: Split by metric name with two additional metrics * Revert ".maintain/monitoring: Ensure to sum over all connections_closed variants" This reverts commit 2d2f93e414440b9fc9e8f7fae6fe48bd95af6b8f. * client/network/service: Remove labels from distinct metrics --- client/network/src/service.rs | 58 ++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 93560a6c0b..8cce3367f7 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -854,6 +854,8 @@ struct Metrics { // This list is ordered alphabetically connections_closed_total: CounterVec, connections_opened_total: CounterVec, + distinct_peers_connections_closed_total: Counter, + distinct_peers_connections_opened_total: Counter, import_queue_blocks_submitted: Counter, import_queue_finality_proofs_submitted: Counter, import_queue_justifications_submitted: Counter, @@ -889,17 +891,25 @@ impl Metrics { connections_closed_total: register(CounterVec::new( Opts::new( "sub_libp2p_connections_closed_total", - "Total number of connections closed, by reason and direction" + "Total number of connections closed, by direction and reason" ), &["direction", "reason"] )?, registry)?, connections_opened_total: register(CounterVec::new( Opts::new( "sub_libp2p_connections_opened_total", - "Total number of connections opened" + "Total number of connections opened by direction" ), &["direction"] )?, registry)?, + distinct_peers_connections_closed_total: register(Counter::new( + "sub_libp2p_distinct_peers_connections_closed_total", + "Total number of connections closed with distinct peers" + )?, registry)?, + distinct_peers_connections_opened_total: register(Counter::new( + "sub_libp2p_distinct_peers_connections_opened_total", + "Total number of connections opened with distinct peers" + )?, registry)?, import_queue_blocks_submitted: register(Counter::new( "import_queue_blocks_submitted", "Number of blocks submitted to the import queue.", @@ -1214,40 +1224,44 @@ impl Future for NetworkWorker { } this.event_streams.send(ev); }, - Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. }) => { + Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + if let Some(metrics) = this.metrics.as_ref() { - match endpoint { - ConnectedPoint::Dialer { .. } => - metrics.connections_opened_total.with_label_values(&["out"]).inc(), - ConnectedPoint::Listener { .. } => - metrics.connections_opened_total.with_label_values(&["in"]).inc(), + let direction = match endpoint { + ConnectedPoint::Dialer { .. } => "out", + ConnectedPoint::Listener { .. } => "in", + }; + metrics.connections_opened_total.with_label_values(&[direction]).inc(); + + if num_established.get() == 1 { + metrics.distinct_peers_connections_opened_total.inc(); } } }, - Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, .. }) => { + Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established }) => { trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); if let Some(metrics) = this.metrics.as_ref() { - let dir = match endpoint { + let direction = match endpoint { ConnectedPoint::Dialer { .. } => "out", ConnectedPoint::Listener { .. } => "in", }; - - match cause { - ConnectionError::IO(_) => - metrics.connections_closed_total.with_label_values(&[dir, "transport-error"]).inc(), + let reason = match cause { + ConnectionError::IO(_) => "transport-error", ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))) => - metrics.connections_closed_total.with_label_values(&[dir, "ping-timeout"]).inc(), + EitherError::A(PingFailure::Timeout)))))))) => "ping-timeout", ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( EitherError::A(EitherError::A(EitherError::A( - EitherError::B(LegacyConnectionKillError)))))))) => - metrics.connections_closed_total.with_label_values(&[dir, "force-closed"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => - metrics.connections_closed_total.with_label_values(&[dir, "protocol-error"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => - metrics.connections_closed_total.with_label_values(&[dir, "keep-alive-timeout"]).inc(), + EitherError::B(LegacyConnectionKillError)))))))) => "force-closed", + ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => "protocol-error", + ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => "keep-alive-timeout", + }; + metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); + + // `num_established` represents the number of *remaining* connections. + if num_established == 0 { + metrics.distinct_peers_connections_closed_total.inc(); } } }, -- GitLab From d59281fa594df991cf94c4ac32a41de6eea26549 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Tue, 23 Jun 2020 17:26:00 +0200 Subject: [PATCH 062/144] Ensure the listen addresses are consistent with the transport (#6436) * Initial commit Forked at: 0c42cedaac0b1bf3a608031ee3e494b51bfaa0fe No parent branch. * Ensure the listen addresses are consistent with the transport * Update client/network/src/error.rs * Update client/network/src/service.rs * Better implementation * Fix bad previous impl * add boot_nodes * reserved nodes * test boot nodes * reserved nodes tests * add public_addresses and make specific error type * Update client/network/src/error.rs Co-authored-by: Pierre Krieger Co-authored-by: Pierre Krieger --- client/network/src/error.rs | 15 +++- client/network/src/service.rs | 55 +++++++++++++ client/network/src/service/tests.rs | 118 ++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+), 1 deletion(-) diff --git a/client/network/src/error.rs b/client/network/src/error.rs index fed7a331da..b87e495983 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -18,6 +18,7 @@ //! Substrate network possible errors. +use crate::config::TransportConfig; use libp2p::{PeerId, Multiaddr}; use std::fmt; @@ -48,7 +49,18 @@ pub enum Error { second_id: PeerId, }, /// Prometheus metrics error. - Prometheus(prometheus_endpoint::PrometheusError) + Prometheus(prometheus_endpoint::PrometheusError), + /// The network addresses are invalid because they don't match the transport. + #[display( + fmt = "The following addresses are invalid because they don't match the transport: {:?}", + addresses, + )] + AddressesForAnotherTransport { + /// Transport used. + transport: TransportConfig, + /// The invalid addresses. + addresses: Vec, + }, } // Make `Debug` use the `Display` implementation. @@ -65,6 +77,7 @@ impl std::error::Error for Error { Error::Client(ref err) => Some(err), Error::DuplicateBootnode { .. } => None, Error::Prometheus(ref err) => Some(err), + Error::AddressesForAnotherTransport { .. } => None, } } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 8cce3367f7..2ef6b7bc21 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -107,6 +107,24 @@ impl NetworkWorker { /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. pub fn new(params: Params) -> Result, Error> { + // Ensure the listen addresses are consistent with the transport. + ensure_addresses_consistent_with_transport( + params.network_config.listen_addresses.iter(), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.boot_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.reserved_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.public_addresses.iter(), + ¶ms.network_config.transport, + )?; + let (to_worker, from_worker) = tracing_unbounded("mpsc_network_worker"); if let Some(path) = params.network_config.net_config_path { @@ -1469,3 +1487,40 @@ impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { } } } + +fn ensure_addresses_consistent_with_transport<'a>( + addresses: impl Iterator, + transport: &TransportConfig, +) -> Result<(), Error> { + if matches!(transport, TransportConfig::MemoryOnly) { + let addresses: Vec<_> = addresses + .filter(|x| x.iter() + .any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + ) + .cloned() + .collect(); + + if !addresses.is_empty() { + return Err(Error::AddressesForAnotherTransport { + transport: transport.clone(), + addresses, + }); + } + } else { + let addresses: Vec<_> = addresses + .filter(|x| x.iter() + .any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + ) + .cloned() + .collect(); + + if !addresses.is_empty() { + return Err(Error::AddressesForAnotherTransport { + transport: transport.clone(), + addresses, + }); + } + } + + Ok(()) +} diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index b2a91af5bd..c027c3be73 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -18,6 +18,7 @@ use crate::{config, Event, NetworkService, NetworkWorker}; +use libp2p::PeerId; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{sync::Arc, time::Duration}; @@ -138,6 +139,7 @@ fn build_nodes_one_proto() let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id().clone(), @@ -342,3 +344,119 @@ fn lots_of_incoming_peers_works() { future::join_all(background_tasks_to_wait).await }); } + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_listen_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_listen_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_boot_node_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let boot_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + boot_nodes: vec![boot_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let boot_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Memory(rand::random::())], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + boot_nodes: vec![boot_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_reserved_node_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let reserved_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + reserved_nodes: vec![reserved_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let reserved_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Memory(rand::random::())], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + reserved_nodes: vec![reserved_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_public_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let public_address = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + public_addresses: vec![public_address], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_public_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let public_address = config::build_multiaddr![Memory(rand::random::())]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + public_addresses: vec![public_address], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} -- GitLab From 9a9b248bfd489b8d6d4ee25eac0031630723c876 Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Tue, 23 Jun 2020 19:06:07 +0200 Subject: [PATCH 063/144] pallet-contracts: migrate to nested storage transaction mechanism (#6382) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a simple direct storage access module * WIP * Completely migrate to the transactional system. * Format * Fix wasm compilation * Get rid of account_db module * Make deposit event eager * Make restore_to eager * It almost compiles. * Make it compile. * Make the tests compile * Get rid of account_db * Drop the result. * Backport the book keeping. * Fix all remaining tests. * Make it compile for std * Remove a stale TODO marker * Remove another stale TODO * Add proof for `terminate` * Remove a stale comment. * Make restoration diverging. * Remove redudnant trait: `ComputeDispatchFee` * Update frame/contracts/src/exec.rs Co-authored-by: Alexander Theißen * Introduce proper errors into the storage module. * Adds comments for contract storage module. * Inline `ExecutionContext::terminate`. * Restore_to should not let sacrifice itself if the contract present on the stack. * Inline `transfer` function * Update doc - add "if succeeded" * Adapt to TransactionOutcome changes * Updates the docs for `ext_restore_to` * Add a proper assert. * Update frame/contracts/src/wasm/runtime.rs Co-authored-by: Alexander Theißen Co-authored-by: Alexander Theißen Co-authored-by: Alexander Theißen --- Cargo.lock | 1 + frame/contracts/Cargo.toml | 1 + frame/contracts/fixtures/restoration.wat | 6 +- frame/contracts/src/account_db.rs | 450 --------------------- frame/contracts/src/exec.rs | 495 +++++++++++------------ frame/contracts/src/lib.rs | 148 +------ frame/contracts/src/rent.rs | 91 ++++- frame/contracts/src/storage.rs | 195 +++++++++ frame/contracts/src/tests.rs | 150 ++++--- frame/contracts/src/wasm/mod.rs | 20 +- frame/contracts/src/wasm/runtime.rs | 40 +- 11 files changed, 661 insertions(+), 936 deletions(-) delete mode 100644 frame/contracts/src/account_db.rs create mode 100644 frame/contracts/src/storage.rs diff --git a/Cargo.lock b/Cargo.lock index 64524d8c52..08e5102d34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4009,6 +4009,7 @@ dependencies = [ "pallet-transaction-payment", "parity-scale-codec", "parity-wasm 0.41.0", + "pretty_assertions", "pwasm-utils", "serde", "sp-core", diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index b2ba8d014a..57c278a3fb 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -31,6 +31,7 @@ pallet-transaction-payment = { version = "2.0.0-rc3", default-features = false, wabt = "0.9.2" assert_matches = "1.3.0" hex-literal = "0.2.1" +pretty_assertions = "0.6.1" pallet-balances = { version = "2.0.0-rc3", path = "../balances" } pallet-timestamp = { version = "2.0.0-rc3", path = "../timestamp" } pallet-randomness-collective-flip = { version = "2.0.0-rc3", path = "../randomness-collective-flip" } diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 4e11f97d5a..225fdde817 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -1,6 +1,10 @@ (module (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_restore_to" (func $ext_restore_to (param i32 i32 i32 i32 i32 i32 i32 i32))) + (import "env" "ext_restore_to" + (func $ext_restore_to + (param i32 i32 i32 i32 i32 i32 i32 i32) + ) + ) (import "env" "memory" (memory 1 1)) (func (export "call") diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs deleted file mode 100644 index 5e1b0c34b5..0000000000 --- a/frame/contracts/src/account_db.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Auxiliaries to help with managing partial changes to accounts state. - -use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, - TrieIdGenerator, -}; -use crate::exec::StorageKey; -use sp_std::cell::RefCell; -use sp_std::collections::btree_map::{BTreeMap, Entry}; -use sp_std::prelude::*; -use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Bounded, Zero}; -use frame_support::traits::{Currency, Imbalance, SignedImbalance}; -use frame_support::{storage::child, StorageMap}; -use frame_system; - -// Note: we don't provide Option because we can't create -// the trie_id in the overlay, thus we provide an overlay on the fields -// specifically. -pub struct ChangeEntry { - /// If Some(_), then the account balance is modified to the value. If None and `reset` is false, - /// the balance unmodified. If None and `reset` is true, the balance is reset to 0. - balance: Option>, - /// If Some(_), then a contract is instantiated with the code hash. If None and `reset` is false, - /// then the contract code is unmodified. If None and `reset` is true, the contract is deleted. - code_hash: Option>, - /// If Some(_), then the rent allowance is set to the value. If None and `reset` is false, then - /// the rent allowance is unmodified. If None and `reset` is true, the contract is deleted. - rent_allowance: Option>, - storage: BTreeMap>>, - /// If true, indicates that the existing contract and all its storage entries should be removed - /// and replaced with the fields on this change entry. Otherwise, the fields on this change - /// entry are updates merged into the existing contract info and storage. - reset: bool, -} - -impl ChangeEntry { - fn balance(&self) -> Option> { - self.balance.or_else(|| { - if self.reset { - Some(>::zero()) - } else { - None - } - }) - } - - fn code_hash(&self) -> Option>> { - if self.reset { - Some(self.code_hash) - } else { - self.code_hash.map(Some) - } - } - - fn rent_allowance(&self) -> Option>> { - if self.reset { - Some(self.rent_allowance) - } else { - self.rent_allowance.map(Some) - } - } - - fn storage(&self, location: &StorageKey) -> Option>> { - let value = self.storage.get(location).cloned(); - if self.reset { - Some(value.unwrap_or(None)) - } else { - value - } - } -} - -// Cannot derive(Default) since it erroneously bounds T by Default. -impl Default for ChangeEntry { - fn default() -> Self { - ChangeEntry { - rent_allowance: Default::default(), - balance: Default::default(), - code_hash: Default::default(), - storage: Default::default(), - reset: false, - } - } -} - -pub type ChangeSet = BTreeMap<::AccountId, ChangeEntry>; - -pub trait AccountDb { - /// Account is used when overlayed otherwise trie_id must be provided. - /// This is for performance reason. - /// - /// Trie id is None iff account doesn't have an associated trie id in >. - /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage( - &self, - account: &T::AccountId, - trie_id: Option<&TrieId>, - location: &StorageKey, - ) -> Option>; - /// If account has an alive contract then return the code hash associated. - fn get_code_hash(&self, account: &T::AccountId) -> Option>; - /// If account has an alive contract then return the rent allowance associated. - fn get_rent_allowance(&self, account: &T::AccountId) -> Option>; - /// Returns false iff account has no alive contract nor tombstone. - fn contract_exists(&self, account: &T::AccountId) -> bool; - fn get_balance(&self, account: &T::AccountId) -> BalanceOf; - - fn commit(&mut self, change_set: ChangeSet); -} - -pub struct DirectAccountDb; -impl AccountDb for DirectAccountDb { - fn get_storage( - &self, - _account: &T::AccountId, - trie_id: Option<&TrieId>, - location: &StorageKey, - ) -> Option> { - trie_id - .and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) - } - fn get_code_hash(&self, account: &T::AccountId) -> Option> { - >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) - } - fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { - >::get(account).and_then(|i| i.as_alive().map(|i| i.rent_allowance)) - } - fn contract_exists(&self, account: &T::AccountId) -> bool { - >::contains_key(account) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - T::Currency::free_balance(account) - } - fn commit(&mut self, s: ChangeSet) { - let mut total_imbalance = SignedImbalance::zero(); - for (address, changed) in s.into_iter() { - if let Some(balance) = changed.balance() { - let imbalance = T::Currency::make_free_balance_be(&address, balance); - total_imbalance = total_imbalance.merge(imbalance); - } - - if changed.code_hash().is_some() - || changed.rent_allowance().is_some() - || !changed.storage.is_empty() - || changed.reset - { - let old_info = match >::get(&address) { - Some(ContractInfo::Alive(alive)) => Some(alive), - None => None, - // Cannot commit changes to tombstone contract - Some(ContractInfo::Tombstone(_)) => continue, - }; - - let mut new_info = match (changed.reset, old_info.clone(), changed.code_hash) { - // Existing contract is being modified. - (false, Some(info), _) => info, - // Existing contract is being removed. - (true, Some(info), None) => { - child::kill_storage(&info.child_trie_info()); - >::remove(&address); - continue; - } - // Existing contract is being replaced by a new one. - (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.child_trie_info()); - AliveContractInfo:: { - code_hash, - storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, - trie_id: ::TrieIdGenerator::trie_id(&address), - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - last_write: None, - } - } - // New contract is being instantiated. - (_, None, Some(code_hash)) => AliveContractInfo:: { - code_hash, - storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, - trie_id: ::TrieIdGenerator::trie_id(&address), - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - last_write: None, - }, - // There is no existing at the address nor a new one to be instantiated. - (_, None, None) => continue, - }; - - if let Some(rent_allowance) = changed.rent_allowance { - new_info.rent_allowance = rent_allowance; - } - - if let Some(code_hash) = changed.code_hash { - new_info.code_hash = code_hash; - } - - if !changed.storage.is_empty() { - new_info.last_write = Some(>::block_number()); - } - - // NB: this call allocates internally. To keep allocations to the minimum we cache - // the child trie info here. - let child_trie_info = new_info.child_trie_info(); - - // Here we iterate over all storage key-value pairs that were changed throughout the - // execution of a contract and apply them to the substrate storage. - for (key, opt_new_value) in changed.storage.into_iter() { - let hashed_key = blake2_256(&key); - - // In order to correctly update the book keeping we need to fetch the previous - // value of the key-value pair. - // - // It might be a bit more clean if we had an API that supported getting the size - // of the value without going through the loading of it. But at the moment of - // writing, there is no such API. - // - // That's not a show stopper in any case, since the performance cost is - // dominated by the trie traversal anyway. - let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); - - // Update the total number of KV pairs and the number of empty pairs. - match (&opt_prev_value, &opt_new_value) { - (Some(prev_value), None) => { - new_info.total_pair_count -= 1; - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - }, - (None, Some(new_value)) => { - new_info.total_pair_count += 1; - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - }, - (Some(prev_value), Some(new_value)) => { - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - } - (None, None) => {} - } - - // Update the total storage size. - let prev_value_len = opt_prev_value - .as_ref() - .map(|old_value| old_value.len() as u32) - .unwrap_or(0); - let new_value_len = opt_new_value - .as_ref() - .map(|new_value| new_value.len() as u32) - .unwrap_or(0); - new_info.storage_size = new_info - .storage_size - .saturating_add(new_value_len) - .saturating_sub(prev_value_len); - - // Finally, perform the change on the storage. - match opt_new_value { - Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), - None => child::kill(&child_trie_info, &hashed_key), - } - } - - if old_info - .map(|old_info| old_info != new_info) - .unwrap_or(true) - { - >::insert(&address, ContractInfo::Alive(new_info)); - } - } - } - - match total_imbalance { - // If we've detected a positive imbalance as a result of our contract-level machinations - // then it's indicative of a buggy contracts system. - // Panicking is far from ideal as it opens up a DoS attack on block validators, however - // it's a less bad option than allowing arbitrary value to be created. - SignedImbalance::Positive(ref p) if !p.peek().is_zero() => { - panic!("contract subsystem resulting in positive imbalance!") - } - _ => {} - } - } -} - -pub struct OverlayAccountDb<'a, T: Trait + 'a> { - local: RefCell>, - underlying: &'a dyn AccountDb, -} -impl<'a, T: Trait> OverlayAccountDb<'a, T> { - pub fn new(underlying: &'a dyn AccountDb) -> OverlayAccountDb<'a, T> { - OverlayAccountDb { - local: RefCell::new(ChangeSet::new()), - underlying, - } - } - - pub fn into_change_set(self) -> ChangeSet { - self.local.into_inner() - } - - pub fn set_storage( - &mut self, - account: &T::AccountId, - location: StorageKey, - value: Option>, - ) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .storage - .insert(location, value); - } - - /// Return an error if contract already exists (either if it is alive or tombstone) - pub fn instantiate_contract( - &mut self, - account: &T::AccountId, - code_hash: CodeHash, - ) -> Result<(), &'static str> { - if self.contract_exists(account) { - return Err("Alive contract or tombstone already exists"); - } - - let mut local = self.local.borrow_mut(); - let contract = local.entry(account.clone()).or_default(); - - contract.code_hash = Some(code_hash); - contract.rent_allowance = Some(>::max_value()); - - Ok(()) - } - - /// Mark a contract as deleted. - pub fn destroy_contract(&mut self, account: &T::AccountId) { - let mut local = self.local.borrow_mut(); - local.insert( - account.clone(), - ChangeEntry { - reset: true, - ..Default::default() - }, - ); - } - - /// Assume contract exists - pub fn set_rent_allowance(&mut self, account: &T::AccountId, rent_allowance: BalanceOf) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .rent_allowance = Some(rent_allowance); - } - pub fn set_balance(&mut self, account: &T::AccountId, balance: BalanceOf) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .balance = Some(balance); - } -} - -impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { - fn get_storage( - &self, - account: &T::AccountId, - trie_id: Option<&TrieId>, - location: &StorageKey, - ) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) - } - fn get_code_hash(&self, account: &T::AccountId) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.code_hash()) - .unwrap_or_else(|| self.underlying.get_code_hash(account)) - } - fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.rent_allowance()) - .unwrap_or_else(|| self.underlying.get_rent_allowance(account)) - } - fn contract_exists(&self, account: &T::AccountId) -> bool { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.code_hash().map(|code_hash| code_hash.is_some())) - .unwrap_or_else(|| self.underlying.contract_exists(account)) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.balance()) - .unwrap_or_else(|| self.underlying.get_balance(account)) - } - fn commit(&mut self, s: ChangeSet) { - let mut local = self.local.borrow_mut(); - - for (address, changed) in s.into_iter() { - match local.entry(address) { - Entry::Occupied(e) => { - let mut value = e.into_mut(); - if changed.reset { - *value = changed; - } else { - value.balance = changed.balance.or(value.balance); - value.code_hash = changed.code_hash.or(value.code_hash); - value.rent_allowance = changed.rent_allowance.or(value.rent_allowance); - value.storage.extend(changed.storage.into_iter()); - } - } - Entry::Vacant(e) => { - e.insert(changed); - } - } - } - } -} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 9cc1c50260..ff0d4d9dc0 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -15,16 +15,16 @@ // along with Substrate. If not, see . use super::{CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo}; -use crate::account_db::{AccountDb, DirectAccountDb, OverlayAccountDb}; + TrieId, BalanceOf, ContractInfo, TrieIdGenerator}; use crate::gas::{Gas, GasMeter, Token}; use crate::rent; +use crate::storage; use sp_std::prelude::*; -use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; +use sp_runtime::traits::{Bounded, Zero}; use frame_support::{ storage::unhashed, dispatch::DispatchError, - traits::{WithdrawReason, Currency, Time, Randomness}, + traits::{ExistenceRequirement, Currency, Time, Randomness}, }; pub type AccountIdOf = ::AccountId; @@ -105,8 +105,8 @@ pub trait Ext { fn get_storage(&self, key: &StorageKey) -> Option>; /// Sets the storage entry by the given key to the specified value. If `value` is `None` then - /// the storage entry is deleted. Returns an Err if the value size is too large. - fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str>; + /// the storage entry is deleted. + fn set_storage(&mut self, key: StorageKey, value: Option>); /// Instantiate a contract from the given code. /// @@ -129,6 +129,12 @@ pub trait Ext { ) -> Result<(), DispatchError>; /// Transfer all funds to `beneficiary` and delete the contract. + /// + /// Since this function removes the self contract eagerly, if succeeded, no further actions should + /// be performed on this `Ext` instance. + /// + /// This function will fail if the same contract is present on the contract + /// call stack. fn terminate( &mut self, beneficiary: &AccountIdOf, @@ -147,14 +153,20 @@ pub trait Ext { /// Notes a call dispatch. fn note_dispatch_call(&mut self, call: CallOf); - /// Notes a restoration request. - fn note_restore_to( + /// Restores the given destination contract sacrificing the current one. + /// + /// Since this function removes the self contract eagerly, if succeeded, no further actions should + /// be performed on this `Ext` instance. + /// + /// This function will fail if the same contract is present + /// on the contract call stack. + fn restore_to( &mut self, dest: AccountIdOf, code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ); + ) -> Result<(), &'static str>; /// Returns a reference to the account id of the caller. fn caller(&self) -> &AccountIdOf; @@ -264,38 +276,18 @@ impl Token for ExecFeeToken { #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq, Clone))] #[derive(sp_runtime::RuntimeDebug)] pub enum DeferredAction { - DepositEvent { - /// A list of topics this event will be deposited with. - topics: Vec, - /// The event to deposit. - event: Event, - }, DispatchRuntimeCall { /// The account id of the contract who dispatched this call. origin: T::AccountId, /// The call to dispatch. call: ::Call, }, - RestoreTo { - /// The account id of the contract which is removed during the restoration and transfers - /// its storage to the restored contract. - donor: T::AccountId, - /// The account id of the restored contract. - dest: T::AccountId, - /// The code hash of the restored contract. - code_hash: CodeHash, - /// The initial rent allowance to set. - rent_allowance: BalanceOf, - /// The keys to delete upon restoration. - delta: Vec, - }, } pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, - pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, pub config: &'a Config, @@ -320,7 +312,6 @@ where caller: None, self_trie_id: None, self_account: origin, - overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, deferred: Vec::new(), config: &cfg, @@ -338,7 +329,6 @@ where caller: Some(self), self_trie_id: trie_id, self_account: dest, - overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, deferred: Vec::new(), config: self.config, @@ -349,23 +339,6 @@ where } } - /// Transfer balance to `dest` without calling any contract code. - pub fn transfer( - &mut self, - dest: T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter - ) -> Result<(), DispatchError> { - transfer( - gas_meter, - TransferCause::Call, - &self.self_account.clone(), - &dest, - value, - self, - ) - } - /// Make a call to the specified address, optionally transferring some funds. pub fn call( &mut self, @@ -424,8 +397,8 @@ where // If code_hash is not none, then the destination account is a live contract, otherwise // it is a regular account since tombstone accounts have already been rejected. - match nested.overlay.get_code_hash(&dest) { - Some(dest_code_hash) => { + match storage::code_hash::(&dest) { + Ok(dest_code_hash) => { let executable = try_or_exec_error!( nested.loader.load_main(&dest_code_hash), input_data @@ -437,10 +410,9 @@ where input_data, gas_meter, )?; - Ok(output) } - None => Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }), + Err(storage::ContractAbsentError) => Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }), } }) } @@ -477,11 +449,20 @@ where ); // TrieId has not been generated yet and storage is empty since contract is new. - let dest_trie_id = None; + // + // Generate it now. + let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + let output = self.with_nested_context(dest.clone(), Some(dest_trie_id), |nested| { try_or_exec_error!( - nested.overlay.instantiate_contract(&dest, code_hash.clone()), + storage::place_contract::( + &dest, + nested + .self_trie_id + .clone() + .expect("the nested context always has to have self_trie_id"), + code_hash.clone() + ), input_data ); @@ -512,7 +493,7 @@ where )?; // Error out if insufficient remaining balance. - if nested.overlay.get_balance(&dest) < nested.config.existential_deposit { + if T::Currency::free_balance(&dest) < nested.config.existential_deposit { return Err(ExecError { reason: "insufficient remaining balance".into(), buffer: output.data, @@ -520,10 +501,7 @@ where } // Deposit an instantiation event. - nested.deferred.push(DeferredAction::DepositEvent { - event: RawEvent::Instantiated(caller.clone(), dest.clone()), - topics: Vec::new(), - }); + deposit_event::(vec![], RawEvent::Instantiated(caller.clone(), dest.clone())); Ok(output) })?; @@ -531,32 +509,6 @@ where Ok((dest, output)) } - pub fn terminate( - &mut self, - beneficiary: &T::AccountId, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - let self_id = self.self_account.clone(); - let value = self.overlay.get_balance(&self_id); - if let Some(caller) = self.caller { - if caller.is_live(&self_id) { - return Err(DispatchError::Other( - "Cannot terminate a contract that is present on the call stack", - )); - } - } - transfer( - gas_meter, - TransferCause::Terminate, - &self_id, - beneficiary, - value, - self, - )?; - self.overlay.destroy_contract(&self_id); - Ok(()) - } - fn new_call_context<'b>( &'b mut self, caller: T::AccountId, @@ -573,21 +525,26 @@ where } } + /// Execute the given closure within a nested execution context. fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { - let (output, change_set, deferred) = { + use frame_support::storage::TransactionOutcome::*; + let (output, deferred) = { let mut nested = self.nested(dest, trie_id); - let output = func(&mut nested)?; - (output, nested.overlay.into_change_set(), nested.deferred) + let output = frame_support::storage::with_transaction(|| { + let output = func(&mut nested); + match output { + Ok(ref rv) if rv.is_success() => Commit(output), + _ => Rollback(output), + } + })?; + (output, nested.deferred) }; - if output.is_success() { - self.overlay.commit(change_set); self.deferred.extend(deferred); } - Ok(output) } @@ -676,48 +633,27 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( Err("not enough gas to pay transfer fee")? } - // We allow balance to go below the existential deposit here: - let from_balance = ctx.overlay.get_balance(transactor); - let new_from_balance = match from_balance.checked_sub(&value) { - Some(b) => b, - None => Err("balance too low to send value")?, - }; - let to_balance = ctx.overlay.get_balance(dest); - if to_balance.is_zero() && value < ctx.config.existential_deposit { - Err("value too low to create account")? - } - // Only ext_terminate is allowed to bring the sender below the existential deposit - let required_balance = match cause { - Terminate => 0.into(), - _ => ctx.config.existential_deposit - }; - - T::Currency::ensure_can_withdraw( - transactor, - value, - WithdrawReason::Transfer.into(), - new_from_balance.checked_sub(&required_balance) - .ok_or("brings sender below existential deposit")?, - )?; - - let new_to_balance = match to_balance.checked_add(&value) { - Some(b) => b, - None => Err("destination balance too high to receive value")?, + let existence_requirement = match cause { + Terminate => ExistenceRequirement::AllowDeath, + _ => ExistenceRequirement::KeepAlive, }; - - if transactor != dest { - ctx.overlay.set_balance(transactor, new_from_balance); - ctx.overlay.set_balance(dest, new_to_balance); - ctx.deferred.push(DeferredAction::DepositEvent { - event: RawEvent::Transfer(transactor.clone(), dest.clone(), value), - topics: Vec::new(), - }); - } + T::Currency::transfer(transactor, dest, value, existence_requirement)?; Ok(()) } +/// A context that is active within a call. +/// +/// This context has some invariants that must be held at all times. Specifically: +///`ctx` always points to a context of an alive contract. That implies that it has an existent +/// `self_trie_id`. +/// +/// Be advised that there are brief time spans where these invariants could be invalidated. +/// For example, when a contract requests self-termination the contract is removed eagerly. That +/// implies that the control won't be returned to the contract anymore, but there is still some code +/// on the path of the return from that call context. Therefore, care must be taken in these +/// situations. struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { ctx: &'a mut ExecutionContext<'b, T, V, L>, caller: T::AccountId, @@ -735,20 +671,32 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) + let trie_id = self.ctx.self_trie_id.as_ref().expect( + "`ctx.self_trie_id` points to an alive contract within the `CallContext`;\ + it cannot be `None`;\ + expect can't fail;\ + qed", + ); + storage::read_contract_storage(trie_id, key) } - fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { - if let Some(ref value) = value { - if self.max_value_size() < value.len() as u32 { - return Err("value size exceeds maximum"); - } + fn set_storage(&mut self, key: StorageKey, value: Option>) { + let trie_id = self.ctx.self_trie_id.as_ref().expect( + "`ctx.self_trie_id` points to an alive contract within the `CallContext`;\ + it cannot be `None`;\ + expect can't fail;\ + qed", + ); + if let Err(storage::ContractAbsentError) = + storage::write_contract_storage::(&self.ctx.self_account, trie_id, &key, value) + { + panic!( + "the contract must be in the alive state within the `CallContext`;\ + the contract cannot be absent in storage; + write_contract_storage cannot return `None`; + qed" + ); } - - self.ctx - .overlay - .set_storage(&self.ctx.self_account, key, value); - Ok(()) } fn instantiate( @@ -767,7 +715,14 @@ where value: BalanceOf, gas_meter: &mut GasMeter, ) -> Result<(), DispatchError> { - self.ctx.transfer(to.clone(), value, gas_meter) + transfer( + gas_meter, + TransferCause::Call, + &self.ctx.self_account.clone(), + &to, + value, + self.ctx, + ) } fn terminate( @@ -775,7 +730,30 @@ where beneficiary: &AccountIdOf, gas_meter: &mut GasMeter, ) -> Result<(), DispatchError> { - self.ctx.terminate(beneficiary, gas_meter) + let self_id = self.ctx.self_account.clone(); + let value = T::Currency::free_balance(&self_id); + if let Some(caller_ctx) = self.ctx.caller { + if caller_ctx.is_live(&self_id) { + return Err(DispatchError::Other( + "Cannot terminate a contract that is present on the call stack", + )); + } + } + transfer( + gas_meter, + TransferCause::Terminate, + &self_id, + beneficiary, + value, + self.ctx, + )?; + let self_trie_id = self.ctx.self_trie_id.as_ref().expect( + "this function is only invoked by in the context of a contract;\ + a contract has a trie id;\ + this can't be None; qed", + ); + storage::destroy_contract::(&self_id, self_trie_id); + Ok(()) } fn call( @@ -795,20 +773,40 @@ where }); } - fn note_restore_to( + fn restore_to( &mut self, dest: AccountIdOf, code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) { - self.ctx.deferred.push(DeferredAction::RestoreTo { - donor: self.ctx.self_account.clone(), - dest, - code_hash, + ) -> Result<(), &'static str> { + if let Some(caller_ctx) = self.ctx.caller { + if caller_ctx.is_live(&self.ctx.self_account) { + return Err( + "Cannot perform restoration of a contract that is present on the call stack", + ); + } + } + + let result = crate::rent::restore_to::( + self.ctx.self_account.clone(), + dest.clone(), + code_hash.clone(), rent_allowance, delta, - }); + ); + if let Ok(_) = result { + deposit_event::( + vec![], + RawEvent::Restored( + self.ctx.self_account.clone(), + dest, + code_hash, + rent_allowance, + ), + ); + } + result } fn address(&self) -> &T::AccountId { @@ -820,7 +818,7 @@ where } fn balance(&self) -> BalanceOf { - self.ctx.overlay.get_balance(&self.ctx.self_account) + T::Currency::free_balance(&self.ctx.self_account) } fn value_transferred(&self) -> BalanceOf { @@ -844,18 +842,25 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - self.ctx.deferred.push(DeferredAction::DepositEvent { + deposit_event::( topics, - event: RawEvent::ContractExecution(self.ctx.self_account.clone(), data), - }); + RawEvent::ContractExecution(self.ctx.self_account.clone(), data) + ); } fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { - self.ctx.overlay.set_rent_allowance(&self.ctx.self_account, rent_allowance) + if let Err(storage::ContractAbsentError) = + storage::set_rent_allowance::(&self.ctx.self_account, rent_allowance) + { + panic!( + "`self_account` points to an alive contract within the `CallContext`; + set_rent_allowance cannot return `Err`; qed" + ); + } } fn rent_allowance(&self) -> BalanceOf { - self.ctx.overlay.get_rent_allowance(&self.ctx.self_account) + storage::rent_allowance::(&self.ctx.self_account) .unwrap_or(>::max_value()) // Must never be triggered actually } @@ -877,30 +882,37 @@ where } } +fn deposit_event( + topics: Vec, + event: Event, +) { + >::deposit_event_indexed( + &*topics, + ::Event::from(event).into(), + ) +} + /// These tests exercise the executive layer. /// /// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use simple closures. /// This allows you to tackle executive logic more thoroughly without writing a /// wasm VM code. -/// -/// Because it's the executive layer: -/// -/// - no gas meter setup and teardown logic. All balances are *AFTER* gas purchase. -/// - executive layer doesn't alter any storage! #[cfg(test)] mod tests { use super::{ - BalanceOf, ExecFeeToken, ExecutionContext, Ext, Loader, TransferFeeKind, TransferFeeToken, - Vm, ExecResult, RawEvent, DeferredAction, + BalanceOf, Event, ExecFeeToken, ExecResult, ExecutionContext, Ext, Loader, + RawEvent, TransferFeeKind, TransferFeeToken, Vm, }; use crate::{ - account_db::AccountDb, gas::GasMeter, tests::{ExtBuilder, Test}, + gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, exec::{ExecReturnValue, ExecError, STATUS_SUCCESS}, CodeHash, Config, gas::Gas, + storage, }; - use std::{cell::RefCell, rc::Rc, collections::HashMap, marker::PhantomData}; - use assert_matches::assert_matches; + use crate::tests::test_utils::{place_contract, set_balance, get_balance}; use sp_runtime::DispatchError; + use assert_matches::assert_matches; + use std::{cell::RefCell, collections::HashMap, marker::PhantomData, rc::Rc}; const ALICE: u64 = 1; const BOB: u64 = 2; @@ -908,19 +920,14 @@ mod tests { const GAS_LIMIT: Gas = 10_000_000_000; - impl<'a, T, V, L> ExecutionContext<'a, T, V, L> - where T: crate::Trait - { - fn events(&self) -> Vec> { - self.deferred - .iter() - .filter(|action| match *action { - DeferredAction::DepositEvent { .. } => true, - _ => false, - }) - .cloned() - .collect() - } + fn events() -> Vec> { + >::events() + .into_iter() + .filter_map(|meta| match meta.event { + MetaEvent::contracts(contract_event) => Some(contract_event), + _ => None, + }) + .collect() } struct MockCtx<'a> { @@ -1029,7 +1036,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, exec_ch).unwrap(); + place_contract(&BOB, exec_ch); assert_matches!( ctx.call(BOB, value, &mut gas_meter, data), @@ -1051,8 +1058,8 @@ mod tests { let loader = MockLoader::empty(); let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); + set_balance(&origin, 100); + set_balance(&dest, 0); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1072,7 +1079,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); + set_balance(&origin, 100); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1097,8 +1104,8 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); + set_balance(&origin, 100); + set_balance(&dest, 0); let output = ctx.call( dest, @@ -1108,15 +1115,15 @@ mod tests { ).unwrap(); assert!(output.is_success()); - assert_eq!(ctx.overlay.get_balance(&origin), 45); - assert_eq!(ctx.overlay.get_balance(&dest), 55); + assert_eq!(get_balance(&origin), 45); + assert_eq!(get_balance(&dest), 55); }); } #[test] fn changes_are_reverted_on_failing_call() { - // This test verifies that a contract is able to transfer - // some funds to another account. + // This test verifies that changes are reverted on a call which fails (or equally, returns + // a non-zero status code). let origin = ALICE; let dest = BOB; @@ -1129,9 +1136,9 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); + place_contract(&BOB, return_ch); + set_balance(&origin, 100); + set_balance(&dest, 0); let output = ctx.call( dest, @@ -1141,8 +1148,8 @@ mod tests { ).unwrap(); assert!(!output.is_success()); - assert_eq!(ctx.overlay.get_balance(&origin), 100); - assert_eq!(ctx.overlay.get_balance(&dest), 0); + assert_eq!(get_balance(&origin), 100); + assert_eq!(get_balance(&dest), 0); }); } @@ -1159,8 +1166,8 @@ mod tests { let loader = MockLoader::empty(); let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); + set_balance(&origin, 100); + set_balance(&dest, 0); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1184,8 +1191,8 @@ mod tests { let loader = MockLoader::empty(); let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); + set_balance(&origin, 100); + set_balance(&dest, 15); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1212,8 +1219,8 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); + set_balance(&origin, 100); + set_balance(&dest, 15); let mut gas_meter = GasMeter::::new(GAS_LIMIT); @@ -1244,7 +1251,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 0); + set_balance(&origin, 0); let result = ctx.call( dest, @@ -1256,12 +1263,12 @@ mod tests { assert_matches!( result, Err(ExecError { - reason: DispatchError::Other("balance too low to send value"), + reason: DispatchError::Module { message: Some("InsufficientBalance"), .. }, buffer: _, }) ); - assert_eq!(ctx.overlay.get_balance(&origin), 0); - assert_eq!(ctx.overlay.get_balance(&dest), 0); + assert_eq!(get_balance(&origin), 0); + assert_eq!(get_balance(&dest), 0); }); } @@ -1281,7 +1288,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); + place_contract(&BOB, return_ch); let result = ctx.call( dest, @@ -1312,7 +1319,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); + place_contract(&BOB, return_ch); let result = ctx.call( dest, @@ -1340,7 +1347,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, input_data_ch).unwrap(); + place_contract(&BOB, input_data_ch); let result = ctx.call( BOB, @@ -1366,7 +1373,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 100); + set_balance(&ALICE, 100); let result = ctx.instantiate( 1, @@ -1414,8 +1421,8 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&BOB, 1); - ctx.overlay.instantiate_contract(&BOB, recurse_ch).unwrap(); + set_balance(&BOB, 1); + place_contract(&BOB, recurse_ch); let result = ctx.call( BOB, @@ -1460,8 +1467,8 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&dest, bob_ch).unwrap(); - ctx.overlay.instantiate_contract(&CHARLIE, charlie_ch).unwrap(); + place_contract(&dest, bob_ch); + place_contract(&CHARLIE, charlie_ch); let result = ctx.call( dest, @@ -1501,8 +1508,8 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, bob_ch).unwrap(); - ctx.overlay.instantiate_contract(&CHARLIE, charlie_ch).unwrap(); + place_contract(&BOB, bob_ch); + place_contract(&CHARLIE, charlie_ch); let result = ctx.call( BOB, @@ -1550,7 +1557,7 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); + set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( @@ -1564,16 +1571,9 @@ mod tests { // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(ctx.overlay.get_code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, instantiated_contract_address, 100), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Instantiated(ALICE, instantiated_contract_address), - topics: Vec::new(), - } + assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(&events(), &[ + RawEvent::Instantiated(ALICE, instantiated_contract_address) ]); }); } @@ -1590,7 +1590,7 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); + set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( @@ -1603,8 +1603,8 @@ mod tests { ); // Check that the account has not been created. - assert!(ctx.overlay.get_code_hash(&instantiated_contract_address).is_none()); - assert!(ctx.events().is_empty()); + assert!(storage::code_hash::(&instantiated_contract_address).is_err()); + assert!(events().is_empty()); }); } @@ -1635,9 +1635,9 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_balance(&BOB, 100); - ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); assert_matches!( ctx.call(BOB, 20, &mut GasMeter::::new(GAS_LIMIT), vec![]), @@ -1648,20 +1648,9 @@ mod tests { // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(ctx.overlay.get_code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, BOB, 20), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Transfer(BOB, instantiated_contract_address, 15), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Instantiated(BOB, instantiated_contract_address), - topics: Vec::new(), - }, + assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(&events(), &[ + RawEvent::Instantiated(BOB, instantiated_contract_address) ]); }); } @@ -1695,9 +1684,9 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_balance(&BOB, 100); - ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); assert_matches!( ctx.call(BOB, 20, &mut GasMeter::::new(GAS_LIMIT), vec![]), @@ -1706,12 +1695,7 @@ mod tests { // The contract wasn't instantiated so we don't expect to see an instantiation // event here. - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, BOB, 20), - topics: Vec::new(), - }, - ]); + assert_eq!(&events(), &[]); }); } @@ -1732,7 +1716,7 @@ mod tests { .execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); + set_balance(&ALICE, 1000); assert_matches!( ctx.instantiate( @@ -1748,7 +1732,7 @@ mod tests { ); assert_eq!( - &ctx.events(), + &events(), &[] ); }); @@ -1768,8 +1752,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&ALICE, 100); + set_balance(&ALICE, 100); let result = ctx.instantiate( 1, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 245c95a4fa..c12029a856 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -81,8 +81,7 @@ #[macro_use] mod gas; - -mod account_db; +mod storage; mod exec; mod wasm; mod rent; @@ -91,7 +90,6 @@ mod rent; mod tests; use crate::exec::ExecutionContext; -use crate::account_db::{AccountDb, DirectAccountDb}; use crate::wasm::{WasmLoader, WasmVm}; pub use crate::gas::{Gas, GasMeter}; @@ -102,7 +100,6 @@ use serde::{Serialize, Deserialize}; use sp_core::crypto::UncheckedFrom; use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; -use sp_io::hashing::blake2_256; use sp_runtime::{ traits::{ Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, @@ -114,7 +111,7 @@ use frame_support::dispatch::{ }; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, - parameter_types, IsSubType, storage::child::{self, ChildInfo}, + parameter_types, IsSubType, storage::child::ChildInfo, }; use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; use frame_support::weights::GetDispatchInfo; @@ -129,11 +126,6 @@ pub trait ContractAddressFor { fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; } -/// A function that returns the fee for dispatching a `Call`. -pub trait ComputeDispatchFee { - fn compute_dispatch_fee(call: &Call) -> Balance; -} - /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account #[derive(Encode, Decode, RuntimeDebug)] @@ -255,6 +247,12 @@ where } } +impl From> for ContractInfo { + fn from(alive_info: AliveContractInfo) -> Self { + Self::Alive(alive_info) + } +} + /// Get a trie id (trie id must be unique and collision resistant depending upon its context). /// Note that it is different than encode because trie id should be collision resistant /// (being a proper unique identifier). @@ -612,12 +610,7 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let maybe_value = AccountDb::::get_storage( - &DirectAccountDb, - &address, - Some(&contract_info.trie_id), - &key, - ); + let maybe_value = storage::read_contract_storage(&contract_info.trie_id, &key); Ok(maybe_value) } @@ -636,7 +629,7 @@ impl Module { fn execute_wasm( origin: T::AccountId, gas_meter: &mut GasMeter, - func: impl FnOnce(&mut ExecutionContext, &mut GasMeter) -> ExecResult + func: impl FnOnce(&mut ExecutionContext, &mut GasMeter) -> ExecResult, ) -> ExecResult { let cfg = Config::preload(); let vm = WasmVm::new(&cfg.schedule); @@ -645,22 +638,10 @@ impl Module { let result = func(&mut ctx, gas_meter); - if result.as_ref().map(|output| output.is_success()).unwrap_or(false) { - // Commit all changes that made it thus far into the persistent storage. - DirectAccountDb.commit(ctx.overlay.into_change_set()); - } - // Execute deferred actions. ctx.deferred.into_iter().for_each(|deferred| { use self::exec::DeferredAction::*; match deferred { - DepositEvent { - topics, - event, - } => >::deposit_event_indexed( - &*topics, - ::Event::from(event).into(), - ), DispatchRuntimeCall { origin: who, call, @@ -674,112 +655,11 @@ impl Module { gas_meter.refund(post_info.calc_unspent(&info)); Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); } - RestoreTo { - donor, - dest, - code_hash, - rent_allowance, - delta, - } => { - let result = Self::restore_to( - donor.clone(), dest.clone(), code_hash.clone(), rent_allowance.clone(), delta - ); - Self::deposit_event( - RawEvent::Restored(donor, dest, code_hash, rent_allowance, result.is_ok()) - ); - } } }); result } - - fn restore_to( - origin: T::AccountId, - dest: T::AccountId, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, - ) -> DispatchResult { - let mut origin_contract = >::get(&origin) - .and_then(|c| c.get_alive()) - .ok_or(Error::::InvalidSourceContract)?; - - let current_block = >::block_number(); - - if origin_contract.last_write == Some(current_block) { - Err(Error::::InvalidContractOrigin)? - } - - let dest_tombstone = >::get(&dest) - .and_then(|c| c.get_tombstone()) - .ok_or(Error::::InvalidDestinationContract)?; - - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; - - let key_values_taken = delta.iter() - .filter_map(|key| { - child::get_raw( - &origin_contract.child_trie_info(), - &blake2_256(key), - ).map(|value| { - child::kill( - &origin_contract.child_trie_info(), - &blake2_256(key), - ); - - (key, value) - }) - }) - .collect::>(); - - let tombstone = >::new( - // This operation is cheap enough because last_write (delta not included) - // is not this block as it has been checked earlier. - &child::root( - &origin_contract.child_trie_info(), - )[..], - code_hash, - ); - - if tombstone != dest_tombstone { - for (key, value) in key_values_taken { - child::put_raw( - &origin_contract.child_trie_info(), - &blake2_256(key), - &value, - ); - } - - return Err(Error::::InvalidTombstone.into()); - } - - origin_contract.storage_size -= key_values_taken.iter() - .map(|(_, value)| value.len() as u32) - .sum::(); - - >::remove(&origin); - >::insert(&dest, ContractInfo::Alive(RawAliveContractInfo { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - empty_pair_count: origin_contract.empty_pair_count, - total_pair_count: origin_contract.total_pair_count, - code_hash, - rent_allowance, - deduct_block: current_block, - last_write, - })); - - let origin_free_balance = T::Currency::free_balance(&origin); - T::Currency::make_free_balance_be(&origin, >::zero()); - T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok(()) - } } decl_event! { @@ -789,9 +669,6 @@ decl_event! { ::AccountId, ::Hash { - /// Transfer happened `from` to `to` with given `value` as part of a `call` or `instantiate`. - Transfer(AccountId, AccountId, Balance), - /// Contract deployed by address at the specified address. Instantiated(AccountId, AccountId), @@ -803,7 +680,7 @@ decl_event! { /// - `tombstone`: `bool`: True if the evicted contract left behind a tombstone. Evicted(AccountId, bool), - /// Restoration for a contract has been initiated. + /// Restoration for a contract has been successful. /// /// # Params /// @@ -811,8 +688,7 @@ decl_event! { /// - `dest`: `AccountId`: Account ID of the restored contract /// - `code_hash`: `Hash`: Code hash of the restored contract /// - `rent_allowance: `Balance`: Rent allowance of the restored contract - /// - `success`: `bool`: True if the restoration was successful - Restored(AccountId, AccountId, Hash, Balance, bool), + Restored(AccountId, AccountId, Hash, Balance), /// Code with the specified hash has been stored. CodeStored(Hash), diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 1d8f474627..6afd85aa8e 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -18,8 +18,10 @@ use crate::{ AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, - TombstoneContractInfo, Trait, + TombstoneContractInfo, Trait, CodeHash, }; +use sp_std::prelude::*; +use sp_io::hashing::blake2_256; use frame_support::storage::child; use frame_support::traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReason}; use frame_support::StorageMap; @@ -396,3 +398,90 @@ pub fn compute_rent_projection( current_block_number + blocks_left, )) } + +/// Restores the destination account using the origin as prototype. +/// +/// The restoration will be performed iff: +/// - origin exists and is alive, +/// - the origin's storage is not written in the current block +/// - the restored account has tombstone +/// - the tombstone matches the hash of the origin storage root, and code hash. +/// +/// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to +/// the restored account. The restored account will inherit the last write block and its last +/// deduct block will be set to the current block. +pub fn restore_to( + origin: T::AccountId, + dest: T::AccountId, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, +) -> Result<(), &'static str> { + let mut origin_contract = >::get(&origin) + .and_then(|c| c.get_alive()) + .ok_or("Cannot restore from inexisting or tombstone contract")?; + + let child_trie_info = origin_contract.child_trie_info(); + + let current_block = >::block_number(); + + if origin_contract.last_write == Some(current_block) { + return Err("Origin TrieId written in the current block"); + } + + let dest_tombstone = >::get(&dest) + .and_then(|c| c.get_tombstone()) + .ok_or("Cannot restore to inexisting or alive contract")?; + + let last_write = if !delta.is_empty() { + Some(current_block) + } else { + origin_contract.last_write + }; + + let key_values_taken = delta.iter() + .filter_map(|key| { + child::get_raw(&child_trie_info, &blake2_256(key)).map(|value| { + child::kill(&child_trie_info, &blake2_256(key)); + (key, value) + }) + }) + .collect::>(); + + let tombstone = >::new( + // This operation is cheap enough because last_write (delta not included) + // is not this block as it has been checked earlier. + &child::root(&child_trie_info)[..], + code_hash, + ); + + if tombstone != dest_tombstone { + for (key, value) in key_values_taken { + child::put_raw(&child_trie_info, &blake2_256(key), &value); + } + + return Err("Tombstones don't match"); + } + + origin_contract.storage_size -= key_values_taken.iter() + .map(|(_, value)| value.len() as u32) + .sum::(); + + >::remove(&origin); + >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { + trie_id: origin_contract.trie_id, + storage_size: origin_contract.storage_size, + empty_pair_count: origin_contract.empty_pair_count, + total_pair_count: origin_contract.total_pair_count, + code_hash, + rent_allowance, + deduct_block: current_block, + last_write, + })); + + let origin_free_balance = T::Currency::free_balance(&origin); + T::Currency::make_free_balance_be(&origin, >::zero()); + T::Currency::deposit_creating(&dest, origin_free_balance); + + Ok(()) +} diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs new file mode 100644 index 0000000000..4c5ad892a9 --- /dev/null +++ b/frame/contracts/src/storage.rs @@ -0,0 +1,195 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! This module contains routines for accessing and altering a contract related state. + +use crate::{ + exec::{AccountIdOf, StorageKey}, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, +}; +use sp_std::prelude::*; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::Bounded; +use frame_support::{storage::child, StorageMap}; + +/// An error that means that the account requested either doesn't exist or represents a tombstone +/// account. +#[cfg_attr(test, derive(PartialEq, Eq, Debug))] +pub struct ContractAbsentError; + +/// Reads a storage kv pair of a contract. +/// +/// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract +/// doesn't store under the given `key` `None` is returned. +pub fn read_contract_storage(trie_id: &TrieId, key: &StorageKey) -> Option> { + child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) +} + +/// Update a storage entry into a contract's kv storage. +/// +/// If the `opt_new_value` is `None` then the kv pair is removed. +/// +/// This function also updates the bookkeeping info such as: number of total non-empty pairs a +/// contract owns, the last block the storage was written to, etc. That's why, in contrast to +/// `read_contract_storage`, this function also requires the `account` ID. +/// +/// If the contract specified by the id `account` doesn't exist `Err` is returned.` +pub fn write_contract_storage( + account: &AccountIdOf, + trie_id: &TrieId, + key: &StorageKey, + opt_new_value: Option>, +) -> Result<(), ContractAbsentError> { + let mut new_info = match >::get(account) { + Some(ContractInfo::Alive(alive)) => alive, + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAbsentError), + }; + + let hashed_key = blake2_256(key); + let child_trie_info = &crate::child_trie_info(&trie_id); + + // In order to correctly update the book keeping we need to fetch the previous + // value of the key-value pair. + // + // It might be a bit more clean if we had an API that supported getting the size + // of the value without going through the loading of it. But at the moment of + // writing, there is no such API. + // + // That's not a show stopper in any case, since the performance cost is + // dominated by the trie traversal anyway. + let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); + + // Update the total number of KV pairs and the number of empty pairs. + match (&opt_prev_value, &opt_new_value) { + (Some(prev_value), None) => { + new_info.total_pair_count -= 1; + if prev_value.is_empty() { + new_info.empty_pair_count -= 1; + } + }, + (None, Some(new_value)) => { + new_info.total_pair_count += 1; + if new_value.is_empty() { + new_info.empty_pair_count += 1; + } + }, + (Some(prev_value), Some(new_value)) => { + if prev_value.is_empty() { + new_info.empty_pair_count -= 1; + } + if new_value.is_empty() { + new_info.empty_pair_count += 1; + } + } + (None, None) => {} + } + + // Update the total storage size. + let prev_value_len = opt_prev_value + .as_ref() + .map(|old_value| old_value.len() as u32) + .unwrap_or(0); + let new_value_len = opt_new_value + .as_ref() + .map(|new_value| new_value.len() as u32) + .unwrap_or(0); + new_info.storage_size = new_info + .storage_size + .saturating_add(new_value_len) + .saturating_sub(prev_value_len); + + new_info.last_write = Some(>::block_number()); + >::insert(&account, ContractInfo::Alive(new_info)); + + // Finally, perform the change on the storage. + match opt_new_value { + Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), + None => child::kill(&child_trie_info, &hashed_key), + } + + Ok(()) +} + +/// Returns the rent allowance set for the contract give by the account id. +pub fn rent_allowance( + account: &AccountIdOf, +) -> Result, ContractAbsentError> { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) + .ok_or(ContractAbsentError) +} + +/// Set the rent allowance for the contract given by the account id. +/// +/// Returns `Err` if the contract doesn't exist or is a tombstone. +pub fn set_rent_allowance( + account: &AccountIdOf, + rent_allowance: BalanceOf, +) -> Result<(), ContractAbsentError> { + >::mutate(account, |maybe_contract_info| match maybe_contract_info { + Some(ContractInfo::Alive(ref mut alive_info)) => { + alive_info.rent_allowance = rent_allowance; + Ok(()) + } + _ => Err(ContractAbsentError), + }) +} + +/// Returns the code hash of the contract specified by `account` ID. +pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.code_hash)) + .ok_or(ContractAbsentError) +} + +/// Creates a new contract descriptor in the storage with the given code hash at the given address. +/// +/// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. +pub fn place_contract( + account: &AccountIdOf, + trie_id: TrieId, + ch: CodeHash, +) -> Result<(), &'static str> { + >::mutate(account, |maybe_contract_info| { + if maybe_contract_info.is_some() { + return Err("Alive contract or tombstone already exists"); + } + + *maybe_contract_info = Some( + AliveContractInfo:: { + code_hash: ch, + storage_size: 0, + trie_id, + deduct_block: >::block_number(), + rent_allowance: >::max_value(), + empty_pair_count: 0, + total_pair_count: 0, + last_write: None, + } + .into(), + ); + + Ok(()) + }) +} + +/// Removes the contract and all the storage associated with it. +/// +/// This function doesn't affect the account. +pub fn destroy_contract(address: &AccountIdOf, trie_id: &TrieId) { + >::remove(address); + child::kill_storage(&crate::child_trie_info(&trie_id)); +} diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a98fdf2d25..df6afa8ac5 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -16,9 +16,7 @@ use crate::{ BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, - RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, - account_db::{AccountDb, DirectAccountDb, OverlayAccountDb}, - gas::Gas, + RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, }; use assert_matches::assert_matches; use hex_literal::*; @@ -64,6 +62,34 @@ impl_outer_dispatch! { } } +pub mod test_utils { + use super::{Test, Balances}; + use crate::{ContractInfoOf, TrieIdGenerator, CodeHash}; + use crate::storage::{write_contract_storage, read_contract_storage}; + use crate::exec::StorageKey; + use frame_support::{StorageMap, traits::Currency}; + + pub fn set_storage(addr: &u64, key: &StorageKey, value: Option>) { + let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); + write_contract_storage::(&1, &contract_info.trie_id, key, value).unwrap(); + } + pub fn get_storage(addr: &u64, key: &StorageKey) -> Option> { + let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); + read_contract_storage(&contract_info.trie_id, key) + } + pub fn place_contract(address: &u64, code_hash: CodeHash) { + let trie_id = ::TrieIdGenerator::trie_id(address); + crate::storage::place_contract::(&address, trie_id, code_hash).unwrap() + } + pub fn set_balance(who: &u64, amount: u64) { + let imbalance = Balances::deposit_creating(who, amount); + drop(imbalance); + } + pub fn get_balance(who: &u64) -> u64 { + Balances::free_balance(who) + } +} + thread_local! { static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); } @@ -280,6 +306,8 @@ fn returns_base_call_cost() { #[test] fn account_removal_does_not_remove_storage() { + use self::test_utils::{set_storage, get_storage}; + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); @@ -288,8 +316,7 @@ fn account_removal_does_not_remove_storage() { // Set up two accounts with free balance above the existential threshold. { - let _ = Balances::deposit_creating(&1, 110); - ContractInfoOf::::insert(1, &ContractInfo::Alive(RawAliveContractInfo { + let alice_contract_info = ContractInfo::Alive(RawAliveContractInfo { trie_id: trie_id1.clone(), storage_size: 0, empty_pair_count: 0, @@ -298,15 +325,13 @@ fn account_removal_does_not_remove_storage() { code_hash: H256::repeat_byte(1), rent_allowance: 40, last_write: None, - })); + }); + let _ = Balances::deposit_creating(&ALICE, 110); + ContractInfoOf::::insert(ALICE, &alice_contract_info); + set_storage(&ALICE, &key1, Some(b"1".to_vec())); + set_storage(&ALICE, &key2, Some(b"2".to_vec())); - let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); - overlay.set_storage(&1, key1.clone(), Some(b"1".to_vec())); - overlay.set_storage(&1, key2.clone(), Some(b"2".to_vec())); - DirectAccountDb.commit(overlay.into_change_set()); - - let _ = Balances::deposit_creating(&2, 110); - ContractInfoOf::::insert(2, &ContractInfo::Alive(RawAliveContractInfo { + let bob_contract_info = ContractInfo::Alive(RawAliveContractInfo { trie_id: trie_id2.clone(), storage_size: 0, empty_pair_count: 0, @@ -315,40 +340,39 @@ fn account_removal_does_not_remove_storage() { code_hash: H256::repeat_byte(2), rent_allowance: 40, last_write: None, - })); - - let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); - overlay.set_storage(&2, key1.clone(), Some(b"3".to_vec())); - overlay.set_storage(&2, key2.clone(), Some(b"4".to_vec())); - DirectAccountDb.commit(overlay.into_change_set()); + }); + let _ = Balances::deposit_creating(&BOB, 110); + ContractInfoOf::::insert(BOB, &bob_contract_info); + set_storage(&BOB, &key1, Some(b"3".to_vec())); + set_storage(&BOB, &key2, Some(b"4".to_vec())); } - // Transfer funds from account 1 of such amount that after this transfer - // the balance of account 1 will be below the existential threshold. + // Transfer funds from ALICE account of such amount that after this transfer + // the balance of the ALICE account will be below the existential threshold. // // This does not remove the contract storage as we are not notified about a // account removal. This cannot happen in reality because a contract can only // remove itself by `ext_terminate`. There is no external event that can remove // the account appart from that. - assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); + assert_ok!(Balances::transfer(Origin::signed(ALICE), BOB, 20)); // Verify that no entries are removed. { assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), + get_storage(&ALICE, key1), Some(b"1".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), + get_storage(&ALICE, key2), Some(b"2".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), + get_storage(&BOB, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), + get_storage(&BOB, key2), Some(b"4".to_vec()) ); } @@ -376,7 +400,7 @@ fn instantiate_and_call_and_deposit_event() { vec![], ); - assert_eq!(System::events(), vec![ + pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), @@ -406,7 +430,9 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + event: MetaEvent::balances( + pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) + ), topics: vec![], }, EventRecord { @@ -479,7 +505,7 @@ fn dispatch_call() { vec![], )); - assert_eq!(System::events(), vec![ + pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), @@ -509,7 +535,9 @@ fn dispatch_call() { }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + event: MetaEvent::balances( + pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) + ), topics: vec![], }, EventRecord { @@ -606,7 +634,7 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { ), "contract trapped during execution" ); - assert_eq!(System::events(), vec![ + pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), @@ -636,7 +664,9 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + event: MetaEvent::balances( + pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) + ), topics: vec![], }, EventRecord { @@ -1323,9 +1353,6 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // Advance 4 blocks, to the 5th. initialize_block(5); - // Preserve `BOB`'s code hash for later introspection. - let bob_code_hash = ContractInfoOf::::get(BOB).unwrap() - .get_alive().unwrap().code_hash; // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 // we expect that it will get removed leaving tombstone. assert_err_ignore_postinfo!( @@ -1367,17 +1394,25 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // Perform a call to `DJANGO`. This should either perform restoration successfully or // fail depending on the test parameters. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - DJANGO, - 0, - GAS_LIMIT, - vec![], - )); + let perform_the_restoration = || { + Contracts::call( + Origin::signed(ALICE), + DJANGO, + 0, + GAS_LIMIT, + vec![], + ) + }; if test_different_storage || test_restore_to_with_dirty_storage { // Parametrization of the test imply restoration failure. Check that `DJANGO` aka // restoration contract is still in place and also that `BOB` doesn't exist. + + assert_err_ignore_postinfo!( + perform_the_restoration(), + "contract trapped during execution" + ); + assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); let django_contract = ContractInfoOf::::get(DJANGO).unwrap() .get_alive().unwrap(); @@ -1386,18 +1421,10 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert_eq!(django_contract.deduct_block, System::block_number()); match (test_different_storage, test_restore_to_with_dirty_storage) { (true, false) => { - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_code_hash, 50, false) - ), - topics: vec![], - }, - ]); + assert_eq!(System::events(), vec![]); } (_, true) => { - assert_eq!(System::events(), vec![ + pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), @@ -1425,7 +1452,9 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(CHARLIE, DJANGO, 30_000)), + event: MetaEvent::balances( + pallet_balances::RawEvent::Transfer(CHARLIE, DJANGO, 30_000) + ), topics: vec![], }, EventRecord { @@ -1433,22 +1462,13 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), topics: vec![], }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Restored( - DJANGO, - BOB, - bob_code_hash, - 50, - false, - )), - topics: vec![], - }, ]); } _ => unreachable!(), } } else { + assert_ok!(perform_the_restoration()); + // Here we expect that the restoration is succeeded. Check that the restoration // contract `DJANGO` ceased to exist and that `BOB` returned back. println!("{:?}", ContractInfoOf::::get(BOB)); @@ -1468,7 +1488,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: EventRecord { phase: Phase::Initialization, event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50, true) + RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50) ), topics: vec![], }, diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index cb69cd689b..890915a793 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -229,11 +229,8 @@ mod tests { fn get_storage(&self, key: &StorageKey) -> Option> { self.storage.get(key).cloned() } - fn set_storage(&mut self, key: StorageKey, value: Option>) - -> Result<(), &'static str> - { + fn set_storage(&mut self, key: StorageKey, value: Option>) { *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); - Ok(()) } fn instantiate( &mut self, @@ -304,19 +301,20 @@ mod tests { fn note_dispatch_call(&mut self, call: Call) { self.dispatches.push(DispatchEntry(call)); } - fn note_restore_to( + fn restore_to( &mut self, dest: u64, code_hash: H256, rent_allowance: u64, delta: Vec, - ) { + ) -> Result<(), &'static str> { self.restores.push(RestoreEntry { dest, code_hash, rent_allowance, delta, }); + Ok(()) } fn caller(&self) -> &u64 { &42 @@ -386,9 +384,7 @@ mod tests { fn get_storage(&self, key: &[u8; 32]) -> Option> { (**self).get_storage(key) } - fn set_storage(&mut self, key: [u8; 32], value: Option>) - -> Result<(), &'static str> - { + fn set_storage(&mut self, key: [u8; 32], value: Option>) { (**self).set_storage(key, value) } fn instantiate( @@ -427,14 +423,14 @@ mod tests { fn note_dispatch_call(&mut self, call: Call) { (**self).note_dispatch_call(call) } - fn note_restore_to( + fn restore_to( &mut self, dest: u64, code_hash: H256, rent_allowance: u64, delta: Vec, - ) { - (**self).note_restore_to( + ) -> Result<(), &'static str> { + (**self).restore_to( dest, code_hash, rent_allowance, diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index f87f5d1ef5..b393898835 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -51,6 +51,8 @@ enum SpecialTrap { /// Signals that a trap was generated in response to a succesful call to the /// `ext_terminate` host function. Termination, + /// Signals that a trap was generated because of a successful restoration. + Restoration, } /// Can only be used for one call. @@ -100,6 +102,12 @@ pub(crate) fn to_execution_result( data: Vec::new(), }) }, + Some(SpecialTrap::Restoration) => { + return Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }) + } Some(SpecialTrap::OutOfGas) => { return Err(ExecError { reason: "ran out of gas during contract execution".into(), @@ -387,7 +395,7 @@ define_env!(Env, , let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; let value = Some(read_sandbox_memory(ctx, value_ptr, value_len)?); - ctx.ext.set_storage(key, value).map_err(|_| sp_sandbox::HostError)?; + ctx.ext.set_storage(key, value); Ok(()) }, @@ -399,7 +407,7 @@ define_env!(Env, , ext_clear_storage(ctx, key_ptr: u32) => { let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - ctx.ext.set_storage(key, None).map_err(|_| sp_sandbox::HostError)?; + ctx.ext.set_storage(key, None); Ok(()) }, @@ -799,17 +807,18 @@ define_env!(Env, , Ok(()) }, - // Record a request to restore the caller contract to the specified contract. + // Try to restore the given destination contract sacrificing the caller. // - // At the finalization stage, i.e. when all changes from the extrinsic that invoked this - // contract are committed, this function will compute a tombstone hash from the caller's - // storage and the given code hash and if the hash matches the hash found in the tombstone at - // the specified address - kill the caller contract and restore the destination contract and set - // the specified `rent_allowance`. All caller's funds are transferred to the destination. + // This function will compute a tombstone hash from the caller's storage and the given code hash + // and if the hash matches the hash found in the tombstone at the specified address - kill + // the caller contract and restore the destination contract and set the specified `rent_allowance`. + // All caller's funds are transfered to the destination. // - // This function doesn't perform restoration right away but defers it to the end of the - // transaction. If there is no tombstone in the destination address or if the hashes don't match - // then restoration is cancelled and no changes are made. + // If there is no tombstone at the destination address, the hashes don't match or this contract + // instance is already present on the contract call stack, a trap is generated. + // + // Otherwise, the destination contract is restored. This function is diverging and stops execution + // even on success. // // `dest_ptr`, `dest_len` - the pointer and the length of a buffer that encodes `T::AccountId` // with the address of the to be restored contract. @@ -857,14 +866,15 @@ define_env!(Env, , delta }; - ctx.ext.note_restore_to( + if let Ok(()) = ctx.ext.restore_to( dest, code_hash, rent_allowance, delta, - ); - - Ok(()) + ) { + ctx.special_trap = Some(SpecialTrap::Restoration); + } + Err(sp_sandbox::HostError) }, // Returns the size of the scratch buffer. -- GitLab From 971e52fb70cc3f615da471436469c04b1b99bb3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 24 Jun 2020 12:52:49 +0200 Subject: [PATCH 064/144] seal: Refactor ext_gas_price (#6478) * seal: Refactor ext_gas_price * Remove seals dependency on pallet_transaction_payment * Add weight as an argument to ext_gas_price * Fixed documentation nits from review * Do not use unchecked math even in test code --- Cargo.lock | 1 - bin/node/runtime/src/lib.rs | 2 ++ frame/contracts/Cargo.toml | 2 -- frame/contracts/src/exec.rs | 14 +++++------- frame/contracts/src/lib.rs | 17 ++++++++++---- frame/contracts/src/tests.rs | 12 +++------- frame/contracts/src/wasm/mod.rs | 13 ++++++----- frame/contracts/src/wasm/runtime.rs | 8 ++++--- frame/transaction-payment/src/lib.rs | 33 +++++++++++++--------------- 9 files changed, 51 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08e5102d34..c1ea4a479c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4006,7 +4006,6 @@ dependencies = [ "pallet-contracts-primitives", "pallet-randomness-collective-flip", "pallet-timestamp", - "pallet-transaction-payment", "parity-scale-codec", "parity-wasm 0.41.0", "pretty_assertions", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index cf1b0de8f7..5acaafcab4 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -581,6 +581,7 @@ parameter_types! { impl pallet_contracts::Trait for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; + type Currency = Balances; type Call = Call; type Event = Event; type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; @@ -594,6 +595,7 @@ impl pallet_contracts::Trait for Runtime { type SurchargeReward = SurchargeReward; type MaxDepth = pallet_contracts::DefaultMaxDepth; type MaxValueSize = pallet_contracts::DefaultMaxValueSize; + type WeightPrice = pallet_transaction_payment::Module; } impl pallet_sudo::Trait for Runtime { diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 57c278a3fb..2dee486fcf 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -25,7 +25,6 @@ sp-sandbox = { version = "0.8.0-rc3", default-features = false, path = "../../pr frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "2.0.0-rc3", default-features = false, path = "common" } -pallet-transaction-payment = { version = "2.0.0-rc3", default-features = false, path = "../transaction-payment" } [dev-dependencies] wabt = "0.9.2" @@ -52,5 +51,4 @@ std = [ "pwasm-utils/std", "wasmi-validation/std", "pallet-contracts-primitives/std", - "pallet-transaction-payment/std", ] diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index ff0d4d9dc0..ba3619195d 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -21,10 +21,11 @@ use crate::rent; use crate::storage; use sp_std::prelude::*; -use sp_runtime::traits::{Bounded, Zero}; +use sp_runtime::traits::{Bounded, Zero, Convert}; use frame_support::{ storage::unhashed, dispatch::DispatchError, traits::{ExistenceRequirement, Currency, Time, Randomness}, + weights::Weight, }; pub type AccountIdOf = ::AccountId; @@ -216,8 +217,8 @@ pub trait Ext { /// Returns `None` if the value doesn't exist. fn get_runtime_storage(&self, key: &[u8]) -> Option>; - /// Returns the price of one weight unit. - fn get_weight_price(&self) -> BalanceOf; + /// Returns the price for the specified amount of weight. + fn get_weight_price(&self, weight: Weight) -> BalanceOf; } /// Loader is a companion of the `Vm` trait. It loads an appropriate abstract @@ -874,11 +875,8 @@ where unhashed::get_raw(&key) } - fn get_weight_price(&self) -> BalanceOf { - use pallet_transaction_payment::Module as Payment; - use sp_runtime::SaturatedConversion; - let price = Payment::::weight_to_fee_with_adjustment::(1); - price.saturated_into() + fn get_weight_price(&self, weight: Weight) -> BalanceOf { + T::WeightPrice::convert(weight) } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index c12029a856..63de1ee164 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -102,7 +102,7 @@ use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; use sp_runtime::{ traits::{ - Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, + Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, Convert, }, RuntimeDebug, }; @@ -117,6 +117,7 @@ use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; use frame_support::weights::GetDispatchInfo; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; +use frame_support::weights::Weight; pub type CodeHash = ::Hash; pub type TrieId = Vec; @@ -289,9 +290,10 @@ where } } -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; parameter_types! { /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. @@ -312,10 +314,13 @@ parameter_types! { pub const DefaultMaxValueSize: u32 = 16_384; } -pub trait Trait: frame_system::Trait + pallet_transaction_payment::Trait { +pub trait Trait: frame_system::Trait { type Time: Time; type Randomness: Randomness; + /// The currency in which fees are paid and contract balances are held. + type Currency: Currency; + /// The outer call dispatch type. type Call: Parameter + @@ -371,6 +376,10 @@ pub trait Trait: frame_system::Trait + pallet_transaction_payment::Trait { /// The maximum size of a storage value in bytes. type MaxValueSize: Get; + + /// Used to answer contracts's queries regarding the current weight price. This is **not** + /// used to calculate the actual fee and is only for informational purposes. + type WeightPrice: Convert>; } /// Simple contract address determiner. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index df6afa8ac5..ae81a83be7 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -30,7 +30,7 @@ use frame_support::{ assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, StorageMap, StorageValue, traits::{Currency, Get}, - weights::{Weight, PostDispatchInfo, IdentityFee}, + weights::{Weight, PostDispatchInfo}, }; use std::cell::RefCell; use frame_system::{self as system, EventRecord, Phase}; @@ -169,17 +169,10 @@ impl Convert> for Test { } } -impl pallet_transaction_payment::Trait for Test { - type Currency = Balances; - type OnTransactionPayment = (); - type TransactionByteFee = TransactionByteFee; - type WeightToFee = IdentityFee>; - type FeeMultiplierUpdate = (); -} - impl Trait for Test { type Time = Timestamp; type Randomness = Randomness; + type Currency = Balances; type Call = Call; type DetermineContractAddress = DummyContractAddressFor; type Event = MetaEvent; @@ -193,6 +186,7 @@ impl Trait for Test { type SurchargeReward = SurchargeReward; type MaxDepth = MaxDepth; type MaxValueSize = MaxValueSize; + type WeightPrice = Self; } type Balances = pallet_balances::Module; diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 890915a793..a4814a1b22 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -162,6 +162,7 @@ mod tests { use hex_literal::hex; use assert_matches::assert_matches; use sp_runtime::DispatchError; + use frame_support::weights::Weight; const GAS_LIMIT: Gas = 10_000_000_000; @@ -373,8 +374,8 @@ mod tests { ) ) } - fn get_weight_price(&self) -> BalanceOf { - 1312_u32.into() + fn get_weight_price(&self, weight: Weight) -> BalanceOf { + BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } } @@ -479,8 +480,8 @@ mod tests { fn get_runtime_storage(&self, key: &[u8]) -> Option> { (**self).get_runtime_storage(key) } - fn get_weight_price(&self) -> BalanceOf { - (**self).get_weight_price() + fn get_weight_price(&self, weight: Weight) -> BalanceOf { + (**self).get_weight_price(weight) } } @@ -1056,7 +1057,7 @@ mod tests { const CODE_GAS_PRICE: &str = r#" (module - (import "env" "ext_gas_price" (func $ext_gas_price)) + (import "env" "ext_gas_price" (func $ext_gas_price (param i64))) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1072,7 +1073,7 @@ mod tests { (func (export "call") ;; This stores the gas price in the scratch buffer - (call $ext_gas_price) + (call $ext_gas_price (i64.const 1)) ;; assert $ext_scratch_size == 8 (call $assert diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index b393898835..8c4d1bfb99 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -696,12 +696,14 @@ define_env!(Env, , Ok(()) }, - // Stores the gas price for the current transaction into the scratch buffer. + // Stores the price for the specified amount of gas in scratch buffer. // // The data is encoded as T::Balance. The current contents of the scratch buffer are overwritten. - ext_gas_price(ctx) => { + // It is recommended to avoid specifying very small values for `gas` as the prices for a single + // gas can be smaller than one. + ext_gas_price(ctx, gas: u64) => { ctx.scratch_buf.clear(); - ctx.ext.get_weight_price().encode_to(&mut ctx.scratch_buf); + ctx.ext.get_weight_price(gas).encode_to(&mut ctx.scratch_buf); Ok(()) }, diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 4d920f8ec5..b993a85da3 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -51,7 +51,7 @@ use sp_runtime::{ }, traits::{ Zero, Saturating, SignedExtension, SaturatedConversion, Convert, Dispatchable, - DispatchInfoOf, PostDispatchInfoOf, UniqueSaturatedFrom, UniqueSaturatedInto, + DispatchInfoOf, PostDispatchInfoOf, }, }; use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; @@ -340,23 +340,6 @@ impl Module where tip } } -} - -impl Module { - /// Compute the fee for the specified weight. - /// - /// This fee is already adjusted by the per block fee adjustment factor and is therefore the - /// share that the weight contributes to the overall fee of a transaction. - /// - /// This function is generic in order to supply the contracts module with a way to calculate the - /// gas price. The contracts module is not able to put the necessary `BalanceOf` constraints - /// on its trait. This function is not to be used by this module. - pub fn weight_to_fee_with_adjustment(weight: Weight) -> Balance where - Balance: UniqueSaturatedFrom - { - let fee: u128 = Self::weight_to_fee(weight).unique_saturated_into(); - Balance::unique_saturated_from(NextFeeMultiplier::get().saturating_mul_acc_int(fee)) - } fn weight_to_fee(weight: Weight) -> BalanceOf { // cap the weight to the maximum defined in runtime, otherwise it will be the @@ -366,6 +349,20 @@ impl Module { } } +impl Convert> for Module where + T: Trait, + BalanceOf: FixedPointOperand, +{ + /// Compute the fee for the specified weight. + /// + /// This fee is already adjusted by the per block fee adjustment factor and is therefore the + /// share that the weight contributes to the overall fee of a transaction. It is mainly + /// for informational purposes and not used in the actual fee calculation. + fn convert(weight: Weight) -> BalanceOf { + NextFeeMultiplier::get().saturating_mul_int(Self::weight_to_fee(weight)) + } +} + /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -- GitLab From 19b4b70e7c7cf966cb5f5669a5e153485943095a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 24 Jun 2020 13:53:40 +0200 Subject: [PATCH 065/144] seal: Remove ext_dispatch_call and ext_get_runtime_storage (#6464) Those are way too hard to audit and make only sense with specific chains. They shouldn't be in the core API. --- bin/node/runtime/src/lib.rs | 1 - frame/contracts/fixtures/dispatch_call.wat | 14 - .../fixtures/dispatch_call_then_trap.wat | 15 - .../fixtures/get_runtime_storage.wat | 74 ----- frame/contracts/fixtures/restoration.wat | 4 +- frame/contracts/fixtures/set_rent.wat | 19 +- frame/contracts/src/exec.rs | 48 +--- frame/contracts/src/lib.rs | 44 +-- frame/contracts/src/tests.rs | 260 ------------------ frame/contracts/src/wasm/mod.rs | 144 ---------- frame/contracts/src/wasm/runtime.rs | 54 ---- 11 files changed, 28 insertions(+), 649 deletions(-) delete mode 100644 frame/contracts/fixtures/dispatch_call.wat delete mode 100644 frame/contracts/fixtures/dispatch_call_then_trap.wat delete mode 100644 frame/contracts/fixtures/get_runtime_storage.wat diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5acaafcab4..90bb63874e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -582,7 +582,6 @@ impl pallet_contracts::Trait for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; - type Call = Call; type Event = Event; type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; diff --git a/frame/contracts/fixtures/dispatch_call.wat b/frame/contracts/fixtures/dispatch_call.wat deleted file mode 100644 index db0995bd6c..0000000000 --- a/frame/contracts/fixtures/dispatch_call.wat +++ /dev/null @@ -1,14 +0,0 @@ -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_dispatch_call - (i32.const 8) ;; Pointer to the start of encoded call buffer - (i32.const 11) ;; Length of the buffer - ) - ) - (func (export "deploy")) - - (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") -) diff --git a/frame/contracts/fixtures/dispatch_call_then_trap.wat b/frame/contracts/fixtures/dispatch_call_then_trap.wat deleted file mode 100644 index ce949d6823..0000000000 --- a/frame/contracts/fixtures/dispatch_call_then_trap.wat +++ /dev/null @@ -1,15 +0,0 @@ -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_dispatch_call - (i32.const 8) ;; Pointer to the start of encoded call buffer - (i32.const 11) ;; Length of the buffer - ) - (unreachable) ;; trap so that the top level transaction fails - ) - (func (export "deploy")) - - (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") -) diff --git a/frame/contracts/fixtures/get_runtime_storage.wat b/frame/contracts/fixtures/get_runtime_storage.wat deleted file mode 100644 index 6148f1c408..0000000000 --- a/frame/contracts/fixtures/get_runtime_storage.wat +++ /dev/null @@ -1,74 +0,0 @@ -(module - (import "env" "ext_get_runtime_storage" - (func $ext_get_runtime_storage (param i32 i32) (result i32)) - ) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "deploy")) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func $call (export "call") - ;; Load runtime storage for the first key and assert that it exists. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 16) - (i32.const 4) - ) - (i32.const 0) - ) - ) - - ;; assert $ext_scratch_size == 4 - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 4) - ) - ) - - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 4) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 4) ;; Count of bytes to copy. - ) - - ;; assert that contents of the buffer is equal to the i32 value of 0x14144020. - (call $assert - (i32.eq - (i32.load - (i32.const 4) - ) - (i32.const 0x14144020) - ) - ) - - ;; Load the second key and assert that it doesn't exist. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 20) - (i32.const 4) - ) - (i32.const 1) - ) - ) - ) - - ;; The first key, 4 bytes long. - (data (i32.const 16) "\01\02\03\04") - ;; The second key, 4 bytes long. - (data (i32.const 20) "\02\03\04\05") -) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 225fdde817..07e11e9d38 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -51,8 +51,8 @@ ;; Code hash of SET_RENT (data (i32.const 264) - "\c2\1c\41\10\a5\22\d8\59\1c\4c\77\35\dd\2d\bf\a1" - "\13\0b\50\93\76\9b\92\31\97\b7\c5\74\26\aa\38\2a" + "\ab\d6\58\65\1e\83\6e\4a\18\0d\f2\6d\bc\42\ba\e9" + "\3d\64\76\e5\30\5b\33\46\bb\4d\43\99\38\21\ee\32" ) ;; Rent allowance diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index d1affa0d74..3e6bd491bc 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -1,5 +1,5 @@ (module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) @@ -23,11 +23,13 @@ ) ) - ;; transfer 50 to ALICE + ;; transfer 50 to CHARLIE (func $call_2 - (call $ext_dispatch_call - (i32.const 68) - (i32.const 11) + (call $assert + (i32.eq + (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (i32.const 0) + ) ) ) @@ -96,6 +98,9 @@ ;; Encoding of 10 in balance (data (i32.const 0) "\28") - ;; Encoding of call transfer 50 to CHARLIE - (data (i32.const 68) "\00\00\03\00\00\00\00\00\00\00\C8") + ;; encoding of Charlies's account id + (data (i32.const 68) "\03") + + ;; encoding of 50 balance + (data (i32.const 76) "\32") ) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index ba3619195d..4e68aac615 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -29,7 +29,6 @@ use frame_support::{ }; pub type AccountIdOf = ::AccountId; -pub type CallOf = ::Call; pub type MomentOf = <::Time as Time>::Moment; pub type SeedOf = ::Hash; pub type BlockNumberOf = ::BlockNumber; @@ -151,9 +150,6 @@ pub trait Ext { input_data: Vec, ) -> ExecResult; - /// Notes a call dispatch. - fn note_dispatch_call(&mut self, call: CallOf); - /// Restores the given destination contract sacrificing the current one. /// /// Since this function removes the self contract eagerly, if succeeded, no further actions should @@ -274,23 +270,11 @@ impl Token for ExecFeeToken { } } -#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq, Clone))] -#[derive(sp_runtime::RuntimeDebug)] -pub enum DeferredAction { - DispatchRuntimeCall { - /// The account id of the contract who dispatched this call. - origin: T::AccountId, - /// The call to dispatch. - call: ::Call, - }, -} - pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, pub depth: usize, - pub deferred: Vec>, pub config: &'a Config, pub vm: &'a V, pub loader: &'a L, @@ -314,7 +298,6 @@ where self_trie_id: None, self_account: origin, depth: 0, - deferred: Vec::new(), config: &cfg, vm: &vm, loader: &loader, @@ -331,7 +314,6 @@ where self_trie_id: trie_id, self_account: dest, depth: self.depth + 1, - deferred: Vec::new(), config: self.config, vm: self.vm, loader: self.loader, @@ -532,21 +514,14 @@ where where F: FnOnce(&mut ExecutionContext) -> ExecResult { use frame_support::storage::TransactionOutcome::*; - let (output, deferred) = { - let mut nested = self.nested(dest, trie_id); - let output = frame_support::storage::with_transaction(|| { - let output = func(&mut nested); - match output { - Ok(ref rv) if rv.is_success() => Commit(output), - _ => Rollback(output), - } - })?; - (output, nested.deferred) - }; - if output.is_success() { - self.deferred.extend(deferred); - } - Ok(output) + let mut nested = self.nested(dest, trie_id); + frame_support::storage::with_transaction(|| { + let output = func(&mut nested); + match output { + Ok(ref rv) if rv.is_success() => Commit(output), + _ => Rollback(output), + } + }) } /// Returns whether a contract, identified by address, is currently live in the execution @@ -767,13 +742,6 @@ where self.ctx.call(to.clone(), value, gas_meter, input_data) } - fn note_dispatch_call(&mut self, call: CallOf) { - self.ctx.deferred.push(DeferredAction::DispatchRuntimeCall { - origin: self.ctx.self_account.clone(), - call, - }); - } - fn restore_to( &mut self, dest: AccountIdOf, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 63de1ee164..4db77a078e 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -106,16 +106,13 @@ use sp_runtime::{ }, RuntimeDebug, }; -use frame_support::dispatch::{ - PostDispatchInfo, DispatchResult, Dispatchable, DispatchResultWithPostInfo -}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, - parameter_types, IsSubType, storage::child::ChildInfo, + decl_module, decl_event, decl_storage, decl_error, + parameter_types, storage::child::ChildInfo, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, + traits::{OnUnbalanced, Currency, Get, Time, Randomness}, }; -use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; -use frame_support::weights::GetDispatchInfo; -use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; +use frame_system::{self as system, ensure_signed, ensure_root}; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; use frame_support::weights::Weight; @@ -321,12 +318,6 @@ pub trait Trait: frame_system::Trait { /// The currency in which fees are paid and contract balances are held. type Currency: Currency; - /// The outer call dispatch type. - type Call: - Parameter + - Dispatchable::Origin> + - IsSubType, Self> + GetDispatchInfo; - /// The overarching event type. type Event: From> + Into<::Event>; @@ -644,30 +635,7 @@ impl Module { let vm = WasmVm::new(&cfg.schedule); let loader = WasmLoader::new(&cfg.schedule); let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); - - let result = func(&mut ctx, gas_meter); - - // Execute deferred actions. - ctx.deferred.into_iter().for_each(|deferred| { - use self::exec::DeferredAction::*; - match deferred { - DispatchRuntimeCall { - origin: who, - call, - } => { - let info = call.get_dispatch_info(); - let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); - let post_info = match result { - Ok(post_info) => post_info, - Err(err) => err.post_info, - }; - gas_meter.refund(post_info.calc_unspent(&info)); - Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); - } - } - }); - - result + func(&mut ctx, gas_meter) } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index ae81a83be7..5303375e01 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -173,7 +173,6 @@ impl Trait for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type Call = Call; type DetermineContractAddress = DummyContractAddressFor; type Event = MetaEvent; type TrieIdGenerator = DummyTrieIdGenerator; @@ -446,233 +445,6 @@ fn instantiate_and_call_and_deposit_event() { }); } -#[test] -fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::("dispatch_call").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Let's keep this assert even though it's redundant. If you ever need to update the - // wasm source this test will fail and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - GAS_LIMIT, - vec![], - )); - - pretty_assertions::assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), - topics: vec![], - }, - - // Dispatching the call. - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(CHARLIE, 50) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(BOB, CHARLIE, 50) - ), - topics: vec![], - }, - - // Event emitted as a result of dispatch. - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Dispatched(BOB, true)), - topics: vec![], - } - ]); - }); -} - -#[test] -fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::("dispatch_call_then_trap").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Let's keep this assert even though it's redundant. If you ever need to update the - // wasm source this test will fail and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - - // Call the newly instantiated contract. The contract is expected to dispatch a call - // and then trap. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - GAS_LIMIT, - vec![], - ), - "contract trapped during execution" - ); - pretty_assertions::assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), - topics: vec![], - }, - // ABSENCE of events which would be caused by dispatched Balances::transfer call - ]); - }); -} - #[test] fn run_out_of_gas() { let (wasm, code_hash) = compile_module::("run_out_of_gas").unwrap(); @@ -1773,38 +1545,6 @@ fn cannot_self_destruct_in_constructor() { }); } -#[test] -fn get_runtime_storage() { - let (wasm, code_hash) = compile_module::("get_runtime_storage").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - frame_support::storage::unhashed::put_raw( - &[1, 2, 3, 4], - 0x14144020u32.to_le_bytes().to_vec().as_ref() - ); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - vec![], - )); - }); -} - #[test] fn crypto_hashes() { let (wasm, code_hash) = compile_module::("crypto_hashes").unwrap(); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index a4814a1b22..3d2f5b154f 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -206,7 +206,6 @@ mod tests { instantiates: Vec, terminations: Vec, transfers: Vec, - dispatches: Vec, restores: Vec, // (topics, data) events: Vec<(Vec, Vec)>, @@ -299,9 +298,6 @@ mod tests { }); Ok(()) } - fn note_dispatch_call(&mut self, call: Call) { - self.dispatches.push(DispatchEntry(call)); - } fn restore_to( &mut self, dest: u64, @@ -421,9 +417,6 @@ mod tests { ) -> ExecResult { (**self).call(to, value, gas_meter, input_data) } - fn note_dispatch_call(&mut self, call: Call) { - (**self).note_dispatch_call(call) - } fn restore_to( &mut self, dest: u64, @@ -1238,44 +1231,6 @@ mod tests { ).unwrap(); } - const CODE_DISPATCH_CALL: &str = r#" -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_dispatch_call - (i32.const 8) ;; Pointer to the start of encoded call buffer - (i32.const 13) ;; Length of the buffer - ) - ) - (func (export "deploy")) - - (data (i32.const 8) "\00\01\2A\00\00\00\00\00\00\00\E5\14\00") -) -"#; - - #[test] - fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - - let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_DISPATCH_CALL, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); - - assert_eq!( - &mock_ext.dispatches, - &[DispatchEntry( - Call::Balances(pallet_balances::Call::set_balance(42, 1337, 0)), - )] - ); - } - const CODE_RETURN_FROM_START_FN: &str = r#" (module (import "env" "ext_return" (func $ext_return (param i32 i32))) @@ -1883,103 +1838,4 @@ mod tests { assert_eq!(output, ExecReturnValue { status: 17, data: hex!("5566778899").to_vec() }); assert!(!output.is_success()); } - - const CODE_GET_RUNTIME_STORAGE: &str = r#" -(module - (import "env" "ext_get_runtime_storage" - (func $ext_get_runtime_storage (param i32 i32) (result i32)) - ) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "deploy")) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func $call (export "call") - ;; Load runtime storage for the first key and assert that it exists. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 16) - (i32.const 4) - ) - (i32.const 0) - ) - ) - - ;; assert $ext_scratch_size == 4 - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 4) - ) - ) - - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 4) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 4) ;; Count of bytes to copy. - ) - - ;; assert that contents of the buffer is equal to the i32 value of 0x14144020. - (call $assert - (i32.eq - (i32.load - (i32.const 4) - ) - (i32.const 0x14144020) - ) - ) - - ;; Load the second key and assert that it doesn't exist. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 20) - (i32.const 4) - ) - (i32.const 1) - ) - ) - ) - - ;; The first key, 4 bytes long. - (data (i32.const 16) "\01\02\03\04") - ;; The second key, 4 bytes long. - (data (i32.const 20) "\02\03\04\05") -) -"#; - - #[test] - fn get_runtime_storage() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let mock_ext = MockExt::default(); - - // "\01\02\03\04" - Some(0x14144020) - // "\02\03\04\05" - None - *mock_ext.runtime_storage_keys.borrow_mut() = [ - ([1, 2, 3, 4].to_vec(), Some(0x14144020u32.to_le_bytes().to_vec())), - ([2, 3, 4, 5].to_vec().to_vec(), None), - ] - .iter() - .cloned() - .collect(); - let _ = execute( - CODE_GET_RUNTIME_STORAGE, - vec![], - mock_ext, - &mut gas_meter, - ).unwrap(); - } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 8c4d1bfb99..7b64117cd2 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -32,7 +32,6 @@ use sp_io::hashing::{ blake2_128, sha2_256, }; -use frame_support::weights::GetDispatchInfo; /// The value returned from ext_call and ext_instantiate contract external functions if the call or /// instantiation traps. This value is chosen as if the execution does not trap, the return value @@ -162,8 +161,6 @@ pub enum RuntimeToken { /// The given number of bytes is read from the sandbox memory and /// is returned as the return data buffer of the call. ReturnData(u32), - /// Dispatched a call with the given weight. - DispatchWithWeight(Gas), /// (topic_count, data_bytes): A buffer of the given size is posted as an event indexed with the /// given number of topics. DepositEvent(u32, u32), @@ -204,7 +201,6 @@ impl Token for RuntimeToken { data_and_topics_cost.checked_add(metadata.event_base_cost) ) }, - DispatchWithWeight(gas) => gas.checked_add(metadata.dispatch_base_cost), }; value.unwrap_or_else(|| Bounded::max_value()) @@ -785,30 +781,6 @@ define_env!(Env, , Ok(()) }, - // Decodes the given buffer as a `T::Call` and adds it to the list - // of to-be-dispatched calls. - // - // All calls made it to the top-level context will be dispatched before - // finishing the execution of the calling extrinsic. - ext_dispatch_call(ctx, call_ptr: u32, call_len: u32) => { - let call: <::T as Trait>::Call = - read_sandbox_memory_as(ctx, call_ptr, call_len)?; - - // We already deducted the len costs when reading from the sandbox. - // Bill on the actual weight of the dispatched call. - let info = call.get_dispatch_info(); - charge_gas( - &mut ctx.gas_meter, - ctx.schedule, - &mut ctx.special_trap, - RuntimeToken::DispatchWithWeight(info.weight) - )?; - - ctx.ext.note_dispatch_call(call); - - Ok(()) - }, - // Try to restore the given destination contract sacrificing the caller. // // This function will compute a tombstone hash from the caller's storage and the given code hash @@ -1005,32 +977,6 @@ define_env!(Env, , Ok(()) }, - // Retrieve the value under the given key from the **runtime** storage and return 0. - // If there is no entry under the given key then this function will return 1 and - // clear the scratch buffer. - // - // - key_ptr: the pointer into the linear memory where the requested value is placed. - // - key_len: the length of the key in bytes. - ext_get_runtime_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { - // Steal the scratch buffer so that we hopefully save an allocation for the `key_buf`. - read_sandbox_memory_into_scratch(ctx, key_ptr, key_len)?; - let key_buf = mem::replace(&mut ctx.scratch_buf, Vec::new()); - - match ctx.ext.get_runtime_storage(&key_buf) { - Some(value_buf) => { - // The given value exists. - ctx.scratch_buf = value_buf; - Ok(0) - } - None => { - // Put back the `key_buf` and allow its allocation to be reused. - ctx.scratch_buf = key_buf; - ctx.scratch_buf.clear(); - Ok(1) - } - } - }, - // Computes the SHA2 256-bit hash on the given input buffer. // // Returns the result directly into the given output buffer. -- GitLab From 357279d9554b8bd996055683aa84ea9d5fadd477 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 24 Jun 2020 15:32:50 +0200 Subject: [PATCH 066/144] Generic Normalize impl for arithmetic and npos-elections (#6374) * add normalize * better api for normalize * Some grumbles * Update primitives/arithmetic/src/lib.rs Co-authored-by: Guillaume Thiolliere * More great review grumbles * Way better doc for everything. * Some improvement * Update primitives/arithmetic/src/lib.rs Co-authored-by: Bernhard Schuster Co-authored-by: Guillaume Thiolliere Co-authored-by: Bernhard Schuster --- frame/staking/fuzzer/src/submit_solution.rs | 17 +- frame/staking/src/offchain_election.rs | 3 +- frame/staking/src/testing_utils.rs | 7 +- primitives/arithmetic/fuzzer/Cargo.toml | 4 + primitives/arithmetic/fuzzer/src/normalize.rs | 62 +++ .../fuzzer/src/per_thing_rational.rs | 2 +- primitives/arithmetic/src/lib.rs | 364 ++++++++++++++++- primitives/arithmetic/src/per_things.rs | 11 +- primitives/arithmetic/src/traits.rs | 2 +- primitives/npos-elections/benches/phragmen.rs | 4 +- primitives/npos-elections/src/helpers.rs | 57 ++- primitives/npos-elections/src/lib.rs | 114 +++--- primitives/npos-elections/src/tests.rs | 366 +++++++++++------- primitives/runtime/src/lib.rs | 2 +- test-utils/src/lib.rs | 2 +- 15 files changed, 790 insertions(+), 227 deletions(-) create mode 100644 primitives/arithmetic/fuzzer/src/normalize.rs diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs index 7094c7ed88..7293cf2389 100644 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ b/frame/staking/fuzzer/src/submit_solution.rs @@ -44,7 +44,9 @@ enum Mode { } pub fn new_test_ext(iterations: u32) -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::().map(Into::into) + let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default() + .build_storage::() + .map(Into::into) .expect("Failed to create test externalities."); let (offchain, offchain_state) = TestOffchainExt::new(); @@ -70,26 +72,29 @@ fn main() { loop { fuzz!(|data: (u32, u32, u32, u32, u32)| { let (mut num_validators, mut num_nominators, mut edge_per_voter, mut to_elect, mode_u32) = data; + // always run with 5 iterations. let mut ext = new_test_ext(5); let mode: Mode = unsafe { std::mem::transmute(mode_u32) }; num_validators = to_range(num_validators, 50, 1000); num_nominators = to_range(num_nominators, 50, 2000); edge_per_voter = to_range(edge_per_voter, 1, 16); to_elect = to_range(to_elect, 20, num_validators); + let do_reduce = true; - println!("+++ instance with params {} / {} / {} / {:?}({}) / {}", + println!("+++ instance with params {} / {} / {} / {} / {:?}({})", num_nominators, num_validators, edge_per_voter, + to_elect, mode, mode_u32, - to_elect, ); ext.execute_with(|| { // initial setup init_active_era(); + assert_ok!(create_validators_with_nominators_for_era::( num_validators, num_nominators, @@ -97,11 +102,11 @@ fn main() { true, None, )); + >::put(ElectionStatus::Open(1)); assert!(>::create_stakers_snapshot().0); - let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); - println!("++ Chain setup done."); + let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); // stuff to submit let (winners, compact, score, size) = match mode { @@ -141,8 +146,6 @@ fn main() { } }; - println!("++ Submission ready. Score = {:?}", score); - // must have chosen correct number of winners. assert_eq!(winners.len() as u32, >::validator_count()); diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index 23453e0524..79f3a5c2d9 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -203,7 +203,8 @@ pub fn prepare_submission( } // Convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio(staked); + let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio_normalized(staked) + .map_err(|e| OffchainElectionError::from(e))?; // convert back to staked to compute the score in the receiver's accuracy. This can be done // nicer, for now we do it as such since this code is not time-critical. This ensure that the diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 86d137ac30..a73073bb1f 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -201,11 +201,8 @@ pub fn get_weak_solution( }; // convert back to ratio assignment. This takes less space. - let low_accuracy_assignment: Vec> = - staked_assignments - .into_iter() - .map(|sa| sa.into_assignment(true)) - .collect(); + let low_accuracy_assignment = assignment_staked_to_ratio_normalized(staked_assignments) + .expect("Failed to normalize"); // re-calculate score based on what the chain will decode. let score = { diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index a37ab876ef..b6bbe3d8a6 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -24,6 +24,10 @@ num-traits = "0.2" name = "biguint" path = "src/biguint.rs" +[[bin]] +name = "normalize" +path = "src/normalize.rs" + [[bin]] name = "per_thing_rational" path = "src/per_thing_rational.rs" diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs new file mode 100644 index 0000000000..34c4ef9cb0 --- /dev/null +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run normalize`. `honggfuzz` CLI options can +//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug normalize hfuzz_workspace/normalize/*.fuzz`. + +use honggfuzz::fuzz; +use sp_arithmetic::Normalizable; +use std::convert::TryInto; + +fn main() { + let sum_limit = u32::max_value() as u128; + let len_limit: usize = u32::max_value().try_into().unwrap(); + + loop { + fuzz!(|data: (Vec, u32)| { + let (data, norm) = data; + if data.len() == 0 { return; } + let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); + + let normalized = data.normalize(norm); + // error cases. + if pre_sum > sum_limit || data.len() > len_limit { + assert!(normalized.is_err()) + } else { + if let Ok(normalized) = normalized { + // if sum goes beyond u128, panic. + let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); + + // if this function returns Ok(), then it will ALWAYS be accurate. + assert_eq!( + sum, + norm as u128, + "sums don't match {:?}, {}", + normalized, + norm, + ); + } + } + }) + } +} diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index 0820a35100..fc22eacc9e 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -114,7 +114,7 @@ fn main() { } } -fn assert_per_thing_equal_error(a: T, b: T, err: u128) { +fn assert_per_thing_equal_error(a: P, b: P, err: u128) { let a_abs = a.deconstruct().saturated_into::(); let b_abs = b.deconstruct().saturated_into::(); let diff = a_abs.max(b_abs) - a_abs.min(b_abs); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 9fdfe4b5e1..5c0d2baa51 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -41,10 +41,11 @@ mod fixed_point; mod rational128; pub use fixed_point::{FixedPointNumber, FixedPointOperand, FixedI64, FixedI128, FixedU128}; -pub use per_things::{PerThing, InnerOf, Percent, PerU16, Permill, Perbill, Perquintill}; +pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbill, Perquintill}; pub use rational128::Rational128; -use sp_std::cmp::Ordering; +use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; +use traits::{BaseArithmetic, One, Zero, SaturatedConversion, Unsigned}; /// Trait for comparing two numbers with an threshold. /// @@ -85,8 +86,365 @@ where } } +/// A collection-like object that is made of values of type `T` and can normalize its individual +/// values around a centric point. +/// +/// Note that the order of items in the collection may affect the result. +pub trait Normalizable { + /// Normalize self around `targeted_sum`. + /// + /// Only returns `Ok` if the new sum of results is guaranteed to be equal to `targeted_sum`. + /// Else, returns an error explaining why it failed to do so. + fn normalize(&self, targeted_sum: T) -> Result, &'static str>; +} + +macro_rules! impl_normalize_for_numeric { + ($($numeric:ty),*) => { + $( + impl Normalizable<$numeric> for Vec<$numeric> { + fn normalize(&self, targeted_sum: $numeric) -> Result, &'static str> { + normalize(self.as_ref(), targeted_sum) + } + } + )* + }; +} + +impl_normalize_for_numeric!(u8, u16, u32, u64, u128); + +impl Normalizable

for Vec

{ + fn normalize(&self, targeted_sum: P) -> Result, &'static str> { + let inners = self.iter().map(|p| p.clone().deconstruct().into()).collect::>(); + let normalized = normalize(inners.as_ref(), targeted_sum.deconstruct().into())?; + Ok(normalized.into_iter().map(|i: UpperOf

| P::from_parts(i.saturated_into())).collect()) + } +} + + +/// Normalize `input` so that the sum of all elements reaches `targeted_sum`. +/// +/// This implementation is currently in a balanced position between being performant and accurate. +/// +/// 1. We prefer storing original indices, and sorting the `input` only once. This will save the +/// cost of sorting per round at the cost of a little bit of memory. +/// 2. The granularity of increment/decrements is determined by the number of elements in `input` +/// and their sum difference with `targeted_sum`, namely `diff = diff(sum(input), target_sum)`. +/// This value is then distributed into `per_round = diff / input.len()` and `leftover = diff % +/// round`. First, per_round is applied to all elements of input, and then we move to leftover, +/// in which case we add/subtract 1 by 1 until `leftover` is depleted. +/// +/// When the sum is less than the target, the above approach always holds. In this case, then each +/// individual element is also less than target. Thus, by adding `per_round` to each item, neither +/// of them can overflow the numeric bound of `T`. In fact, neither of the can go beyond +/// `target_sum`*. +/// +/// If sum is more than target, there is small twist. The subtraction of `per_round` +/// form each element might go below zero. In this case, we saturate and add the error to the +/// `leftover` value. This ensures that the result will always stay accurate, yet it might cause the +/// execution to become increasingly slow, since leftovers are applied one by one. +/// +/// All in all, the complicated case above is rare to happen in all substrate use cases, hence we +/// opt for it due to its simplicity. +/// +/// This function will return an error is if length of `input` cannot fit in `T`, or if `sum(input)` +/// cannot fit inside `T`. +/// +/// * This proof is used in the implementation as well. +pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> + where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, +{ + // compute sum and return error if failed. + let mut sum = T::zero(); + for t in input.iter() { + sum = sum.checked_add(t).ok_or("sum of input cannot fit in `T`")?; + } + + // convert count and return error if failed. + let count = input.len(); + let count_t: T = count.try_into().map_err(|_| "length of `inputs` cannot fit in `T`")?; + + // Nothing to do here. + if count.is_zero() { + return Ok(Vec::::new()); + } + + let diff = targeted_sum.max(sum) - targeted_sum.min(sum); + if diff.is_zero() { + return Ok(input.to_vec()); + } + + let needs_bump = targeted_sum > sum; + let per_round = diff / count_t; + let mut leftover = diff % count_t; + + // sort output once based on diff. This will require more data transfer and saving original + // index, but we sort only twice instead: once now and once at the very end. + let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); + output_with_idx.sort_unstable_by_key(|x| x.1); + + if needs_bump { + // must increase the values a bit. Bump from the min element. Index of minimum is now zero + // because we did a sort. If at any point the min goes greater or equal the `max_threshold`, + // we move to the next minimum. + let mut min_index = 0; + // at this threshold we move to next index. + let threshold = targeted_sum / count_t; + + if !per_round.is_zero() { + for _ in 0..count { + output_with_idx[min_index].1 = output_with_idx[min_index].1 + .checked_add(&per_round) + .expect("Proof provided in the module doc; qed."); + if output_with_idx[min_index].1 >= threshold { + min_index += 1; + min_index = min_index % count; + } + } + } + + // continue with the previous min_index + while !leftover.is_zero() { + output_with_idx[min_index].1 = output_with_idx[min_index].1 + .checked_add(&T::one()) + .expect("Proof provided in the module doc; qed."); + if output_with_idx[min_index].1 >= threshold { + min_index += 1; + min_index = min_index % count; + } + leftover -= One::one() + } + } else { + // must decrease the stakes a bit. decrement from the max element. index of maximum is now + // last. if at any point the max goes less or equal the `min_threshold`, we move to the next + // maximum. + let mut max_index = count - 1; + // at this threshold we move to next index. + let threshold = output_with_idx + .first() + .expect("length of input is greater than zero; it must have a first; qed") + .1; + + if !per_round.is_zero() { + for _ in 0..count { + output_with_idx[max_index].1 = output_with_idx[max_index].1 + .checked_sub(&per_round) + .unwrap_or_else(|| { + let remainder = per_round - output_with_idx[max_index].1; + leftover += remainder; + output_with_idx[max_index].1.saturating_sub(per_round) + }); + if output_with_idx[max_index].1 <= threshold { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + } + } + + // continue with the previous max_index + while !leftover.is_zero() { + if let Some(next) = output_with_idx[max_index].1.checked_sub(&One::one()) { + output_with_idx[max_index].1 = next; + if output_with_idx[max_index].1 <= threshold { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + leftover -= One::one() + } else { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + } + } + + debug_assert_eq!( + output_with_idx.iter().fold(T::zero(), |acc, (_, x)| acc + *x), + targeted_sum, + "sum({:?}) != {:?}", + output_with_idx, + targeted_sum, + ); + + // sort again based on the original index. + output_with_idx.sort_unstable_by_key(|x| x.0); + Ok(output_with_idx.into_iter().map(|(_, t)| t).collect()) +} + +#[cfg(test)] +mod normalize_tests { + use super::*; + + #[test] + fn work_for_all_types() { + macro_rules! test_for { + ($type:ty) => { + assert_eq!( + normalize(vec![8 as $type, 9, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + } + } + // it should work for all types as long as the length of vector can be converted to T. + test_for!(u128); + test_for!(u64); + test_for!(u32); + test_for!(u16); + test_for!(u8); + } + + #[test] + fn fails_on_if_input_sum_large() { + assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok()); + assert_eq!( + normalize(vec![1u8; 256].as_ref(), 10), + Err("sum of input cannot fit in `T`"), + ); + } + + #[test] + fn does_not_fail_on_subtraction_overflow() { + assert_eq!( + normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), + vec![1, 9, 0], + ); + assert_eq!( + normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), + vec![0, 1, 0], + ); + } + + #[test] + fn works_for_vec() { + assert_eq!(vec![8u32, 9, 7, 10].normalize(40).unwrap(), vec![10u32, 10, 10, 10]); + } + + #[test] + fn works_for_per_thing() { + assert_eq!( + vec![ + Perbill::from_percent(33), + Perbill::from_percent(33), + Perbill::from_percent(33) + ].normalize(Perbill::one()).unwrap(), + vec![ + Perbill::from_parts(333333334), + Perbill::from_parts(333333333), + Perbill::from_parts(333333333), + ] + ); + + assert_eq!( + vec![ + Perbill::from_percent(20), + Perbill::from_percent(15), + Perbill::from_percent(30) + ].normalize(Perbill::one()).unwrap(), + vec![ + Perbill::from_parts(316666668), + Perbill::from_parts(383333332), + Perbill::from_parts(300000000), + ] + ); + } + + #[test] + fn can_work_for_peru16() { + // Peru16 is a rather special case; since inner type is exactly the same as capacity, we + // could have a situation where the sum cannot be calculated in the inner type. Calculating + // using the upper type of the per_thing should assure this to be okay. + assert_eq!( + vec![ + PerU16::from_percent(40), + PerU16::from_percent(40), + PerU16::from_percent(40), + ].normalize(PerU16::one()).unwrap(), + vec![ + PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845), // 33% + ] + ); + } + + #[test] + fn normalize_works_all_le() { + assert_eq!( + normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), + vec![11, 11, 8, 10], + ); + + assert_eq!( + normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), + vec![11, 8, 11, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), + vec![11, 11, 8, 10], + ); + } + + #[test] + fn normalize_works_some_ge() { + assert_eq!( + normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), + vec![10, 11, 9, 10], + ); + } + + #[test] + fn always_inc_min() { + assert_eq!( + normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + assert_eq!( + normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + assert_eq!( + normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + } + + #[test] + fn normalize_works_all_ge() { + assert_eq!( + normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), + vec![12, 9, 9, 10], + ); + + assert_eq!( + normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), + vec![9, 12, 9, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), + vec![9, 9, 12, 10], + ); + } +} + #[cfg(test)] -mod tests { +mod threshold_compare_tests { use super::*; use crate::traits::Saturating; use sp_std::cmp::Ordering; diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 50b87d5076..521f4d1074 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -21,24 +21,29 @@ use serde::{Serialize, Deserialize}; use sp_std::{ops, fmt, prelude::*, convert::TryInto}; use codec::{Encode, CompactAs}; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, + SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, }; use sp_debug_derive::RuntimeDebug; /// Get the inner type of a `PerThing`. pub type InnerOf

=

::Inner; +/// Get the upper type of a `PerThing`. +pub type UpperOf

=

::Upper; + /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. pub trait PerThing: Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug { /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Copy + fmt::Debug; + type Inner: BaseArithmetic + Unsigned + Copy + fmt::Debug; /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. - type Upper: BaseArithmetic + Copy + From + TryInto + fmt::Debug; + type Upper: + BaseArithmetic + Copy + From + TryInto + + UniqueSaturatedInto + Unsigned + fmt::Debug; /// The accuracy of this type. const ACCURACY: Self::Inner; diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 3921d253da..29b8e419ef 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -22,7 +22,7 @@ use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedNeg, - CheckedShl, CheckedShr, checked_pow, Signed + CheckedShl, CheckedShr, checked_pow, Signed, Unsigned, }; use sp_std::ops::{ Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, diff --git a/primitives/npos-elections/benches/phragmen.rs b/primitives/npos-elections/benches/phragmen.rs index 7e46b9dce1..e2385665bf 100644 --- a/primitives/npos-elections/benches/phragmen.rs +++ b/primitives/npos-elections/benches/phragmen.rs @@ -59,8 +59,8 @@ mod bench_closure_and_slice { } /// Converts a vector of ratio assignments into ones with absolute budget value. - pub fn assignment_ratio_to_staked_slice( - ratio: Vec>, + pub fn assignment_ratio_to_staked_slice( + ratio: Vec>, stakes: &[VoteWeight], ) -> Vec> where diff --git a/primitives/npos-elections/src/helpers.rs b/primitives/npos-elections/src/helpers.rs index 1c96300c66..063eac70c5 100644 --- a/primitives/npos-elections/src/helpers.rs +++ b/primitives/npos-elections/src/helpers.rs @@ -17,37 +17,72 @@ //! Helper methods for npos-elections. -use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf}; -use sp_arithmetic::PerThing; +use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf, Error}; +use sp_arithmetic::{PerThing, InnerOf}; use sp_std::prelude::*; /// Converts a vector of ratio assignments into ones with absolute budget value. -pub fn assignment_ratio_to_staked( - ratio: Vec>, +/// +/// Note that this will NOT attempt at normalizing the result. +pub fn assignment_ratio_to_staked( + ratio: Vec>, stake_of: FS, ) -> Vec> where for<'r> FS: Fn(&'r A) -> VoteWeight, - T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, + P: sp_std::ops::Mul, + ExtendedBalance: From>, { ratio .into_iter() .map(|a| { let stake = stake_of(&a.who); - a.into_staked(stake.into(), true) + a.into_staked(stake.into()) }) .collect() } +/// Same as [`assignment_ratio_to_staked`] and try and do normalization. +pub fn assignment_ratio_to_staked_normalized( + ratio: Vec>, + stake_of: FS, +) -> Result>, Error> +where + for<'r> FS: Fn(&'r A) -> VoteWeight, + P: sp_std::ops::Mul, + ExtendedBalance: From>, +{ + let mut staked = assignment_ratio_to_staked(ratio, &stake_of); + staked.iter_mut().map(|a| + a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) + ).collect::>()?; + Ok(staked) +} + /// Converts a vector of staked assignments into ones with ratio values. -pub fn assignment_staked_to_ratio( +/// +/// Note that this will NOT attempt at normalizing the result. +pub fn assignment_staked_to_ratio( + staked: Vec>, +) -> Vec> +where + ExtendedBalance: From>, +{ + staked.into_iter().map(|a| a.into_assignment()).collect() +} + +/// Same as [`assignment_staked_to_ratio`] and try and do normalization. +pub fn assignment_staked_to_ratio_normalized( staked: Vec>, -) -> Vec> +) -> Result>, Error> where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, { - staked.into_iter().map(|a| a.into_assignment(true)).collect() + let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); + ratio.iter_mut().map(|a| + a.try_normalize().map_err(|err| Error::ArithmeticError(err)) + ).collect::>()?; + Ok(ratio) } /// consumes a vector of winners with backing stake to just winners. diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 72eddf9a1d..592ed3b717 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -30,7 +30,7 @@ use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom}; use sp_arithmetic::{ - PerThing, Rational128, ThresholdOrd, + PerThing, Rational128, ThresholdOrd, InnerOf, Normalizable, helpers_128bit::multiply_by_rational, traits::{Zero, Saturating, Bounded, SaturatedConversion}, }; @@ -84,6 +84,8 @@ pub enum Error { CompactTargetOverflow, /// One of the index functions returned none. CompactInvalidIndex, + /// An error occurred in some arithmetic operation. + ArithmeticError(&'static str), } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -155,16 +157,16 @@ pub struct ElectionResult { /// A voter's stake assignment among a set of targets, represented as ratios. #[derive(Debug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] -pub struct Assignment { +pub struct Assignment { /// Voter's identifier. pub who: AccountId, /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, T)>, + pub distribution: Vec<(AccountId, P)>, } -impl Assignment +impl Assignment where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, { /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. /// @@ -173,50 +175,49 @@ where /// distribution's sum is exactly equal to the total budget, by adding or subtracting the /// remainder from the last distribution. /// - /// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean + /// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean /// anything useful. - pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment + pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment where - T: sp_std::ops::Mul, + P: sp_std::ops::Mul, { - let mut sum: ExtendedBalance = Bounded::min_value(); - let mut distribution = self - .distribution + let distribution = self.distribution .into_iter() .filter_map(|(target, p)| { // if this ratio is zero, then skip it. - if p == Bounded::min_value() { + if p.is_zero() { None } else { // NOTE: this mul impl will always round to the nearest number, so we might both // overflow and underflow. let distribution_stake = p * stake; - // defensive only. We assume that balance cannot exceed extended balance. - sum = sum.saturating_add(distribution_stake); Some((target, distribution_stake)) } }) .collect::>(); - if fill { - // NOTE: we can do this better. - // https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b - if let Some(leftover) = stake.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add(leftover); - } - } else if let Some(excess) = sum.checked_sub(stake) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub(excess); - } - } - } - StakedAssignment { who: self.who, distribution, } } + + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%. + pub fn try_normalize(&mut self) -> Result<(), &'static str> { + self.distribution + .iter() + .map(|(_, p)| *p) + .collect::>() + .normalize(P::one()) + .map(|normalized_ratios| + self.distribution + .iter_mut() + .zip(normalized_ratios) + .for_each(|((_, old), corrected)| { *old = corrected; }) + ) + } } /// A voter's stake assignment among a set of targets, represented as absolute values in the scale @@ -243,42 +244,23 @@ impl StakedAssignment { /// /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge /// can never be re-created and does not mean anything useful anymore. - pub fn into_assignment(self, fill: bool) -> Assignment + pub fn into_assignment(self) -> Assignment where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, + AccountId: IdentifierT, { - let accuracy: u128 = T::ACCURACY.saturated_into(); - let mut sum: u128 = Zero::zero(); - let stake = self.distribution.iter().map(|x| x.1).sum(); - let mut distribution = self - .distribution + let stake = self.total(); + let distribution = self.distribution .into_iter() .filter_map(|(target, w)| { - let per_thing = T::from_rational_approximation(w, stake); + let per_thing = P::from_rational_approximation(w, stake); if per_thing == Bounded::min_value() { None } else { - sum += per_thing.clone().deconstruct().saturated_into(); Some((target, per_thing)) } }) - .collect::>(); - - if fill { - if let Some(leftover) = accuracy.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add( - T::from_parts(leftover.saturated_into()) - ); - } - } else if let Some(excess) = sum.checked_sub(accuracy) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub( - T::from_parts(excess.saturated_into()) - ); - } - } - } + .collect::>(); Assignment { who: self.who, @@ -286,6 +268,30 @@ impl StakedAssignment { } } + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to + /// `stake`. + /// + /// NOTE: current implementation of `.normalize` is almost safe to `expect()` upon. The only + /// error case is when the input cannot fit in `T`, or the sum of input cannot fit in `T`. + /// Sadly, both of these are dependent upon the implementation of `VoteLimit`, i.e. the limit + /// of edges per voter which is enforced from upstream. Hence, at this crate, we prefer + /// returning a result and a use the name prefix `try_`. + pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> { + self.distribution + .iter() + .map(|(_, ref weight)| *weight) + .collect::>() + .normalize(stake) + .map(|normalized_weights| + self.distribution + .iter_mut() + .zip(normalized_weights.into_iter()) + .for_each(|((_, weight), corrected)| { *weight = corrected; }) + ) + } + /// Get the total stake of this assignment (aka voter budget). pub fn total(&self) -> ExtendedBalance { self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1)) diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 08923c6949..80c742117d 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -588,184 +588,276 @@ fn self_votes_should_be_kept() { ); } -#[test] -fn assignment_convert_works() { - let staked = StakedAssignment { - who: 1 as AccountId, - distribution: vec![ - (20, 100 as ExtendedBalance), - (30, 25), - ], - }; - - let assignment = staked.clone().into_assignment(true); - assert_eq!( - assignment, - Assignment { - who: 1, +mod assignment_convert_normalize { + use super::*; + #[test] + fn assignment_convert_works() { + let staked = StakedAssignment { + who: 1 as AccountId, distribution: vec![ - (20, Perbill::from_percent(80)), - (30, Perbill::from_percent(20)), - ] - } - ); - - assert_eq!( - assignment.into_staked(125, true), - staked, - ); -} - -#[test] -fn score_comparison_is_lexicographical_no_epsilon() { - let epsilon = Perbill::zero(); - // only better in the fist parameter, worse in the other two ✅ - assert_eq!( - is_score_better([12, 10, 35], [10, 20, 30], epsilon), - true, - ); - - // worse in the first, better in the other two ❌ - assert_eq!( - is_score_better([9, 30, 10], [10, 20, 30], epsilon), - false, - ); - - // equal in the first, the second one dictates. - assert_eq!( - is_score_better([10, 25, 40], [10, 20, 30], epsilon), - true, - ); - - // equal in the first two, the last one dictates. - assert_eq!( - is_score_better([10, 20, 40], [10, 20, 30], epsilon), - false, - ); -} + (20, 100 as ExtendedBalance), + (30, 25), + ], + }; -#[test] -fn score_comparison_with_epsilon() { - let epsilon = Perbill::from_percent(1); + let assignment = staked.clone().into_assignment(); + assert_eq!( + assignment, + Assignment { + who: 1, + distribution: vec![ + (20, Perbill::from_percent(80)), + (30, Perbill::from_percent(20)), + ] + } + ); - { - // no more than 1 percent (10) better in the first param. assert_eq!( - is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + assignment.into_staked(125), + staked, ); + } - // now equal, still not better. + #[test] + fn assignment_convert_will_not_normalize() { assert_eq!( - is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_percent(33)), + (3, Perbill::from_percent(66)), + ] + }.into_staked(100), + StakedAssignment { + who: 1, + distribution: vec![ + (2, 33), + (3, 66), + // sum is not 100! + ], + }, ); - // now it is. assert_eq!( - is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), - true, + StakedAssignment { + who: 1, + distribution: vec![ + (2, 333_333_333_333_333), + (3, 333_333_333_333_333), + (4, 666_666_666_666_333), + ], + }.into_assignment(), + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(250000000)), + (3, Perbill::from_parts(250000000)), + (4, Perbill::from_parts(499999999)), + // sum is not 100%! + ] + }, + ) + } + + #[test] + fn assignment_can_normalize() { + let mut a = Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(330000000)), + (3, Perbill::from_parts(660000000)), + // sum is not 100%! + ] + }; + a.try_normalize().unwrap(); + assert_eq!( + a, + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(340000000)), + (3, Perbill::from_parts(660000000)), + ] + }, ); } - { - // First score score is epsilon better, but first score is no longer `ge`. Then this is - // still not a good solution. + #[test] + fn staked_assignment_can_normalize() { + let mut a = StakedAssignment { + who: 1, + distribution: vec![ + (2, 33), + (3, 66), + ] + }; + a.try_normalize(100).unwrap(); assert_eq!( - is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), - false, + a, + StakedAssignment { + who: 1, + distribution: vec![ + (2, 34), + (3, 66), + ] + }, ); } +} - { - // first score is equal or better, but not epsilon. Then second one is the determinant. +mod score { + use super::*; + #[test] + fn score_comparison_is_lexicographical_no_epsilon() { + let epsilon = Perbill::zero(); + // only better in the fist parameter, worse in the other two ✅ assert_eq!( - is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + is_score_better([12, 10, 35], [10, 20, 30], epsilon), + true, ); + // worse in the first, better in the other two ❌ assert_eq!( - is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), + is_score_better([9, 30, 10], [10, 20, 30], epsilon), false, ); + // equal in the first, the second one dictates. assert_eq!( - is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), + is_score_better([10, 25, 40], [10, 20, 30], epsilon), true, ); - } - { - // first score and second are equal or less than epsilon more, third is determinant. + // equal in the first two, the last one dictates. assert_eq!( - is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), + is_score_better([10, 20, 40], [10, 20, 30], epsilon), false, ); + } + + #[test] + fn score_comparison_with_epsilon() { + let epsilon = Perbill::from_percent(1); + + { + // no more than 1 percent (10) better in the first param. + assert_eq!( + is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + // now equal, still not better. + assert_eq!( + is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + // now it is. + assert_eq!( + is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), + true, + ); + } + + { + // First score score is epsilon better, but first score is no longer `ge`. Then this is + // still not a good solution. + assert_eq!( + is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + } + + { + // first score is equal or better, but not epsilon. Then second one is the determinant. + assert_eq!( + is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), + true, + ); + } + + { + // first score and second are equal or less than epsilon more, third is determinant. + assert_eq!( + is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), + true, + ); + } + } + + #[test] + fn score_comparison_large_value() { + // some random value taken from eras in kusama. + let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; + // this claim is 0.04090% better in the third component. It should be accepted as better if + // epsilon is smaller than 5/10_0000 + let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; assert_eq!( - is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), - false, + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(1u32, 10_000), + ), + true, ); assert_eq!( - is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(2u32, 10_000), + ), true, ); - } -} - -#[test] -fn score_comparison_large_value() { - // some random value taken from eras in kusama. - let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; - // this claim is 0.04090% better in the third component. It should be accepted as better if - // epsilon is smaller than 5/10_0000 - let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; - - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(1u32, 10_000), - ), - true, - ); - - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(2u32, 10_000), - ), - true, - ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(3u32, 10_000), - ), - true, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(3u32, 10_000), + ), + true, + ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(4u32, 10_000), - ), - true, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(4u32, 10_000), + ), + true, + ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(5u32, 10_000), - ), - false, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(5u32, 10_000), + ), + false, + ); + } } mod compact { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index a8a518fd7b..881ba3d724 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -71,7 +71,7 @@ pub use sp_core::RuntimeDebug; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16, + PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16, InnerOf, Rational128, FixedI64, FixedI128, FixedU128, FixedPointNumber, FixedPointOperand, }; /// Re-export 128 bit helpers. diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index e600ab9fce..8163460df7 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -38,7 +38,7 @@ /// ``` #[macro_export] macro_rules! assert_eq_uvec { - ( $x:expr, $y:expr ) => { + ( $x:expr, $y:expr $(,)? ) => { $crate::__assert_eq_uvec!($x, $y); $crate::__assert_eq_uvec!($y, $x); } -- GitLab From d17396cebe11dce352aeeaac0b2645354cb2b328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 24 Jun 2020 17:01:42 +0200 Subject: [PATCH 067/144] Extract frame_system SignedExtensions into separate files. (#6474) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Split the code. * Restructure. * Split tests. * Self-review. * Break lines. * Move tests out. * Rename CheckEra -> CheckMortality but keep backwards compatibility * Update frame/system/src/extensions/check_mortality.rs * Don't rename the IDENTIFIER for now. Co-authored-by: Bastian Köcher --- frame/system/src/extensions/check_genesis.rs | 58 + .../system/src/extensions/check_mortality.rs | 124 ++ frame/system/src/extensions/check_nonce.rs | 145 ++ .../src/extensions/check_spec_version.rs | 58 + .../system/src/extensions/check_tx_version.rs | 58 + frame/system/src/extensions/check_weight.rs | 644 +++++++ frame/system/src/extensions/mod.rs | 24 + frame/system/src/lib.rs | 1484 +---------------- frame/system/src/mock.rs | 124 ++ frame/system/src/offchain.rs | 2 +- frame/system/src/tests.rs | 424 +++++ frame/system/src/weights.rs | 76 + 12 files changed, 1757 insertions(+), 1464 deletions(-) create mode 100644 frame/system/src/extensions/check_genesis.rs create mode 100644 frame/system/src/extensions/check_mortality.rs create mode 100644 frame/system/src/extensions/check_nonce.rs create mode 100644 frame/system/src/extensions/check_spec_version.rs create mode 100644 frame/system/src/extensions/check_tx_version.rs create mode 100644 frame/system/src/extensions/check_weight.rs create mode 100644 frame/system/src/extensions/mod.rs create mode 100644 frame/system/src/mock.rs create mode 100644 frame/system/src/tests.rs create mode 100644 frame/system/src/weights.rs diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs new file mode 100644 index 0000000000..d0a346519c --- /dev/null +++ b/frame/system/src/extensions/check_genesis.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Encode, Decode}; +use crate::{Trait, Module}; +use sp_runtime::{ + traits::{SignedExtension, Zero}, + transaction_validity::TransactionValidityError, +}; + +/// Genesis hash check to provide replay protection between different networks. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct CheckGenesis(sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for CheckGenesis { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckGenesis") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl CheckGenesis { + /// Creates new `SignedExtension` to check genesis hash. + pub fn new() -> Self { + Self(sp_std::marker::PhantomData) + } +} + +impl SignedExtension for CheckGenesis { + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = T::Hash; + type Pre = (); + const IDENTIFIER: &'static str = "CheckGenesis"; + + fn additional_signed(&self) -> Result { + Ok(>::block_hash(T::BlockNumber::zero())) + } +} diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs new file mode 100644 index 0000000000..cc7496df9a --- /dev/null +++ b/frame/system/src/extensions/check_mortality.rs @@ -0,0 +1,124 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Encode, Decode}; +use crate::{Trait, Module, BlockHash}; +use frame_support::StorageMap; +use sp_runtime::{ + generic::Era, + traits::{SignedExtension, DispatchInfoOf, SaturatedConversion}, + transaction_validity::{ + ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + }, +}; + +/// Check for transaction mortality. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct CheckMortality(Era, sp_std::marker::PhantomData); + +impl CheckMortality { + /// utility constructor. Used only in client/factory code. + pub fn from(era: Era) -> Self { + Self(era, sp_std::marker::PhantomData) + } +} + +impl sp_std::fmt::Debug for CheckMortality { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckMortality({:?})", self.0) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SignedExtension for CheckMortality { + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = T::Hash; + type Pre = (); + // TODO [#6483] rename to CheckMortality + const IDENTIFIER: &'static str = "CheckEra"; + + fn validate( + &self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }) + } + + fn additional_signed(&self) -> Result { + let current_u64 = >::block_number().saturated_into::(); + let n = self.0.birth(current_u64).saturated_into::(); + if !>::contains_key(n) { + Err(InvalidTransaction::AncientBirthBlock.into()) + } else { + Ok(>::block_hash(n)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{Test, new_test_ext, System, CALL}; + use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; + use sp_core::H256; + + #[test] + fn signed_ext_check_era_should_work() { + new_test_ext().execute_with(|| { + // future + assert_eq!( + CheckMortality::::from(Era::mortal(4, 2)).additional_signed().err().unwrap(), + InvalidTransaction::AncientBirthBlock.into(), + ); + + // correct + System::set_block_number(13); + >::insert(12, H256::repeat_byte(1)); + assert!(CheckMortality::::from(Era::mortal(4, 12)).additional_signed().is_ok()); + }) + } + + #[test] + fn signed_ext_check_era_should_change_longevity() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let len = 0_usize; + let ext = ( + crate::CheckWeight::::default(), + CheckMortality::::from(Era::mortal(16, 256)), + ); + System::set_block_number(17); + >::insert(16, H256::repeat_byte(1)); + + assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + }) + } +} diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs new file mode 100644 index 0000000000..1af3a1210a --- /dev/null +++ b/frame/system/src/extensions/check_nonce.rs @@ -0,0 +1,145 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Encode, Decode}; +use crate::Trait; +use frame_support::{ + weights::DispatchInfo, + StorageMap, +}; +use sp_runtime::{ + traits::{SignedExtension, DispatchInfoOf, Dispatchable, One}, + transaction_validity::{ + ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + TransactionLongevity, TransactionPriority, + }, +}; +use sp_std::vec; + +/// Nonce check and increment to give replay protection for transactions. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct CheckNonce(#[codec(compact)] T::Index); + +impl CheckNonce { + /// utility constructor. Used only in client/factory code. + pub fn from(nonce: T::Index) -> Self { + Self(nonce) + } +} + +impl sp_std::fmt::Debug for CheckNonce { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckNonce({})", self.0) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SignedExtension for CheckNonce where + T::Call: Dispatchable +{ + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "CheckNonce"; + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + + fn pre_dispatch( + self, + who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + let mut account = crate::Account::::get(who); + if self.0 != account.nonce { + return Err( + if self.0 < account.nonce { + InvalidTransaction::Stale + } else { + InvalidTransaction::Future + }.into() + ) + } + account.nonce += T::Index::one(); + crate::Account::::insert(who, account); + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + // check index + let account = crate::Account::::get(who); + if self.0 < account.nonce { + return InvalidTransaction::Stale.into() + } + + let provides = vec![Encode::encode(&(who, self.0))]; + let requires = if account.nonce < self.0 { + vec![Encode::encode(&(who, self.0 - One::one()))] + } else { + vec![] + }; + + Ok(ValidTransaction { + priority: info.weight as TransactionPriority, + requires, + provides, + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{Test, new_test_ext, CALL}; + + #[test] + fn signed_ext_check_nonce_works() { + new_test_ext().execute_with(|| { + crate::Account::::insert(1, crate::AccountInfo { + nonce: 1, + refcount: 0, + data: 0, + }); + let info = DispatchInfo::default(); + let len = 0_usize; + // stale + assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); + assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); + // correct + assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); + // future + assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); + }) + } +} diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs new file mode 100644 index 0000000000..8dc4d8d9ce --- /dev/null +++ b/frame/system/src/extensions/check_spec_version.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Trait, Module}; +use codec::{Encode, Decode}; +use sp_runtime::{ + traits::SignedExtension, + transaction_validity::TransactionValidityError, +}; + +/// Ensure the runtime version registered in the transaction is the same as at present. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct CheckSpecVersion(sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for CheckSpecVersion { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckSpecVersion") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl CheckSpecVersion { + /// Create new `SignedExtension` to check runtime version. + pub fn new() -> Self { + Self(sp_std::marker::PhantomData) + } +} + +impl SignedExtension for CheckSpecVersion { + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = u32; + type Pre = (); + const IDENTIFIER: &'static str = "CheckSpecVersion"; + + fn additional_signed(&self) -> Result { + Ok(>::runtime_version().spec_version) + } +} diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs new file mode 100644 index 0000000000..ee6f334936 --- /dev/null +++ b/frame/system/src/extensions/check_tx_version.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Trait, Module}; +use codec::{Encode, Decode}; +use sp_runtime::{ + traits::SignedExtension, + transaction_validity::TransactionValidityError, +}; + +/// Ensure the transaction version registered in the transaction is the same as at present. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +pub struct CheckTxVersion(sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for CheckTxVersion { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckTxVersion") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl CheckTxVersion { + /// Create new `SignedExtension` to check transaction version. + pub fn new() -> Self { + Self(sp_std::marker::PhantomData) + } +} + +impl SignedExtension for CheckTxVersion { + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = u32; + type Pre = (); + const IDENTIFIER: &'static str = "CheckTxVersion"; + + fn additional_signed(&self) -> Result { + Ok(>::runtime_version().transaction_version) + } +} diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs new file mode 100644 index 0000000000..d52138b1e3 --- /dev/null +++ b/frame/system/src/extensions/check_weight.rs @@ -0,0 +1,644 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Trait, Module}; +use codec::{Encode, Decode}; +use sp_runtime::{ + traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, + transaction_validity::{ + ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + TransactionPriority, + }, + Perbill, DispatchResult, +}; +use frame_support::{ + traits::{Get}, + weights::{PostDispatchInfo, DispatchInfo, DispatchClass}, + StorageValue, +}; + +/// Block resource (weight) limit check. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default)] +pub struct CheckWeight(sp_std::marker::PhantomData); + +impl CheckWeight where + T::Call: Dispatchable +{ + /// Get the quota ratio of each dispatch class type. This indicates that all operational and mandatory + /// dispatches can use the full capacity of any resource, while user-triggered ones can consume + /// a portion. + fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { + match class { + DispatchClass::Operational | DispatchClass::Mandatory + => ::one(), + DispatchClass::Normal => T::AvailableBlockRatio::get(), + } + } + + /// Checks if the current extrinsic does not exceed `MaximumExtrinsicWeight` limit. + fn check_extrinsic_weight( + info: &DispatchInfoOf, + ) -> Result<(), TransactionValidityError> { + match info.class { + // Mandatory transactions are included in a block unconditionally, so + // we don't verify weight. + DispatchClass::Mandatory => Ok(()), + // Normal transactions must not exceed `MaximumExtrinsicWeight`. + DispatchClass::Normal => { + let maximum_weight = T::MaximumExtrinsicWeight::get(); + let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); + if extrinsic_weight > maximum_weight { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(()) + } + }, + // For operational transactions we make sure it doesn't exceed + // the space alloted for `Operational` class. + DispatchClass::Operational => { + let maximum_weight = T::MaximumBlockWeight::get(); + let operational_limit = + Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; + let operational_limit = + operational_limit.saturating_sub(T::BlockExecutionWeight::get()); + let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); + if extrinsic_weight > operational_limit { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(()) + } + }, + } + } + + /// Checks if the current extrinsic can fit into the block with respect to block weight limits. + /// + /// Upon successes, it returns the new block weight as a `Result`. + fn check_block_weight( + info: &DispatchInfoOf, + ) -> Result { + let maximum_weight = T::MaximumBlockWeight::get(); + let mut all_weight = Module::::block_weight(); + match info.class { + // If we have a dispatch that must be included in the block, it ignores all the limits. + DispatchClass::Mandatory => { + let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); + all_weight.add(extrinsic_weight, DispatchClass::Mandatory); + Ok(all_weight) + }, + // If we have a normal dispatch, we follow all the normal rules and limits. + DispatchClass::Normal => { + let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; + let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) + .ok_or(InvalidTransaction::ExhaustsResources)?; + all_weight.checked_add(extrinsic_weight, DispatchClass::Normal) + .map_err(|_| InvalidTransaction::ExhaustsResources)?; + if all_weight.get(DispatchClass::Normal) > normal_limit { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(all_weight) + } + }, + // If we have an operational dispatch, allow it if we have not used our full + // "operational space" (independent of existing fullness). + DispatchClass::Operational => { + let operational_limit = Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; + let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; + let operational_space = operational_limit.saturating_sub(normal_limit); + + let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) + .ok_or(InvalidTransaction::ExhaustsResources)?; + all_weight.checked_add(extrinsic_weight, DispatchClass::Operational) + .map_err(|_| InvalidTransaction::ExhaustsResources)?; + + // If it would fit in normally, its okay + if all_weight.total() <= maximum_weight || + // If we have not used our operational space + all_weight.get(DispatchClass::Operational) <= operational_space { + Ok(all_weight) + } else { + Err(InvalidTransaction::ExhaustsResources.into()) + } + } + } + } + + /// Checks if the current extrinsic can fit into the block with respect to block length limits. + /// + /// Upon successes, it returns the new block length as a `Result`. + fn check_block_length( + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let current_len = Module::::all_extrinsics_len(); + let maximum_len = T::MaximumBlockLength::get(); + let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; + let added_len = len as u32; + let next_len = current_len.saturating_add(added_len); + if next_len > limit { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(next_len) + } + } + + /// get the priority of an extrinsic denoted by `info`. + fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { + match info.class { + DispatchClass::Normal => info.weight.into(), + // Don't use up the whole priority space, to allow things like `tip` + // to be taken into account as well. + DispatchClass::Operational => TransactionPriority::max_value() / 2, + // Mandatory extrinsics are only for inherents; never transactions. + DispatchClass::Mandatory => TransactionPriority::min_value(), + } + } + + /// Creates new `SignedExtension` to check weight of the extrinsic. + pub fn new() -> Self { + Self(Default::default()) + } + + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. + /// + /// It checks and notes the new weight and length. + fn do_pre_dispatch( + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + let next_len = Self::check_block_length(info, len)?; + let next_weight = Self::check_block_weight(info)?; + Self::check_extrinsic_weight(info)?; + + crate::AllExtrinsicsLen::put(next_len); + crate::BlockWeight::put(next_weight); + Ok(()) + } + + /// Do the validate checks. This can be applied to both signed and unsigned. + /// + /// It only checks that the block weight and length limit will not exceed. + fn do_validate( + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + // ignore the next length. If they return `Ok`, then it is below the limit. + let _ = Self::check_block_length(info, len)?; + // during validation we skip block limit check. Since the `validate_transaction` + // call runs on an empty block anyway, by this we prevent `on_initialize` weight + // consumption from causing false negatives. + Self::check_extrinsic_weight(info)?; + + Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) + } +} + +impl SignedExtension for CheckWeight where + T::Call: Dispatchable +{ + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "CheckWeight"; + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } + Self::do_pre_dispatch(info, len) + } + + fn validate( + &self, + _who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } + Self::do_validate(info, len) + } + + fn pre_dispatch_unsigned( + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + Self::do_pre_dispatch(info, len) + } + + fn validate_unsigned( + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + Self::do_validate(info, len) + } + + fn post_dispatch( + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + // Since mandatory dispatched do not get validated for being overweight, we are sensitive + // to them actually being useful. Block producers are thus not allowed to include mandatory + // extrinsics that result in error. + if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { + "Bad mandantory".print(); + e.print(); + + Err(InvalidTransaction::BadMandatory)? + } + + let unspent = post_info.calc_unspent(info); + if unspent > 0 { + crate::BlockWeight::mutate(|current_weight| { + current_weight.sub(unspent, info.class); + }) + } + + Ok(()) + } +} + +impl sp_std::fmt::Debug for CheckWeight { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckWeight") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{BlockWeight, AllExtrinsicsLen}; + use crate::mock::{Test, CALL, new_test_ext, System}; + use sp_std::marker::PhantomData; + use frame_support::{assert_ok, assert_noop}; + use frame_support::weights::{Weight, Pays}; + + fn normal_weight_limit() -> Weight { + ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() + } + + fn normal_length_limit() -> u32 { + ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() + } + + #[test] + fn mandatory_extrinsic_doesnt_care_about_limits() { + fn check(call: impl FnOnce(&DispatchInfo, usize)) { + new_test_ext().execute_with(|| { + let max = DispatchInfo { + weight: Weight::max_value(), + class: DispatchClass::Mandatory, + ..Default::default() + }; + let len = 0_usize; + + call(&max, len); + }); + } + + check(|max, len| { + assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); + assert_eq!(System::block_weight().total(), Weight::max_value()); + assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + }); + check(|max, len| { + assert_ok!(CheckWeight::::do_validate(max, len)); + }); + } + + #[test] + fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { + new_test_ext().execute_with(|| { + let max = DispatchInfo { + weight: ::MaximumExtrinsicWeight::get() + 1, + class: DispatchClass::Normal, + ..Default::default() + }; + let len = 0_usize; + + assert_noop!( + CheckWeight::::do_validate(&max, len), + InvalidTransaction::ExhaustsResources + ); + }); + } + + #[test] + fn operational_extrinsic_limited_by_operational_space_limit() { + new_test_ext().execute_with(|| { + let operational_limit = CheckWeight::::get_dispatch_limit_ratio( + DispatchClass::Operational + ) * ::MaximumBlockWeight::get(); + let base_weight = ::ExtrinsicBaseWeight::get(); + let block_base = ::BlockExecutionWeight::get(); + + let weight = operational_limit - base_weight - block_base; + let okay = DispatchInfo { + weight, + class: DispatchClass::Operational, + ..Default::default() + }; + let max = DispatchInfo { + weight: weight + 1, + class: DispatchClass::Operational, + ..Default::default() + }; + let len = 0_usize; + + assert_eq!( + CheckWeight::::do_validate(&okay, len), + Ok(ValidTransaction { + priority: CheckWeight::::get_priority(&okay), + ..Default::default() + }) + ); + assert_noop!( + CheckWeight::::do_validate(&max, len), + InvalidTransaction::ExhaustsResources + ); + }); + } + + #[test] + fn register_extra_weight_unchecked_doesnt_care_about_limits() { + new_test_ext().execute_with(|| { + System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); + assert_eq!(System::block_weight().total(), Weight::max_value()); + assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + }); + } + + #[test] + fn full_block_with_normal_and_operational() { + new_test_ext().execute_with(|| { + // Max block is 1024 + // Max normal is 768 (75%) + // 10 is taken for block execution weight + // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) + // And Operational can be 256 to produce a full block (-5 for base) + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + + let len = 0_usize; + + assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + assert_eq!(System::block_weight().total(), 768); + assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + assert_eq!(::MaximumBlockWeight::get(), 1024); + assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + // Checking single extrinsic should not take current block weight into account. + assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); + }); + } + + #[test] + fn dispatch_order_does_not_effect_weight_logic() { + new_test_ext().execute_with(|| { + // We switch the order of `full_block_with_normal_and_operational` + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + + let len = 0_usize; + + assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + // Extra 15 here from block execution + base extrinsic weight + assert_eq!(System::block_weight().total(), 266); + assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + assert_eq!(::MaximumBlockWeight::get(), 1024); + assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + }); + } + + #[test] + fn operational_works_on_full_block() { + new_test_ext().execute_with(|| { + // An on_initialize takes up the whole block! (Every time!) + System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); + let dispatch_normal = DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; + let dispatch_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let len = 0_usize; + + assert_noop!( + CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + InvalidTransaction::ExhaustsResources + ); + // Thank goodness we can still do an operational transaction to possibly save the blockchain. + assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); + // Not too much though + assert_noop!( + CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + InvalidTransaction::ExhaustsResources + ); + // Even with full block, validity of single transaction should be correct. + assert_eq!(CheckWeight::::check_extrinsic_weight(&dispatch_operational), Ok(())); + }); + } + + #[test] + fn signed_ext_check_weight_works_operational_tx() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { weight: 100, ..Default::default() }; + let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let len = 0_usize; + let normal_limit = normal_weight_limit(); + + // given almost full block + BlockWeight::mutate(|current_weight| { + current_weight.put(normal_limit, DispatchClass::Normal) + }); + // will not fit. + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); + // will fit. + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + + // likewise for length limit. + let len = 100_usize; + AllExtrinsicsLen::put(normal_length_limit()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + }) + } + + #[test] + fn signed_ext() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let len = 0_usize; + + let priority = CheckWeight::(PhantomData) + .validate(&1, CALL, &normal, len) + .unwrap() + .priority; + assert_eq!(priority, 100); + + let priority = CheckWeight::(PhantomData) + .validate(&1, CALL, &op, len) + .unwrap() + .priority; + assert_eq!(priority, u64::max_value() / 2); + }) + } + + #[test] + fn signed_ext_check_weight_block_size_works() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo::default(); + let normal_limit = normal_weight_limit() as usize; + let reset_check_weight = |tx, s, f| { + AllExtrinsicsLen::put(0); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + }; + + reset_check_weight(&normal, normal_limit - 1, false); + reset_check_weight(&normal, normal_limit, false); + reset_check_weight(&normal, normal_limit + 1, true); + + // Operational ones don't have this limit. + let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + reset_check_weight(&op, normal_limit, false); + reset_check_weight(&op, normal_limit + 100, false); + reset_check_weight(&op, 1024, false); + reset_check_weight(&op, 1025, true); + }) + } + + + #[test] + fn signed_ext_check_weight_works_normal_tx() { + new_test_ext().execute_with(|| { + let normal_limit = normal_weight_limit(); + let small = DispatchInfo { weight: 100, ..Default::default() }; + let medium = DispatchInfo { + weight: normal_limit - ::ExtrinsicBaseWeight::get(), + ..Default::default() + }; + let big = DispatchInfo { + weight: normal_limit - ::ExtrinsicBaseWeight::get() + 1, + ..Default::default() + }; + let len = 0_usize; + + let reset_check_weight = |i, f, s| { + BlockWeight::mutate(|current_weight| { + current_weight.put(s, DispatchClass::Normal) + }); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + }; + + reset_check_weight(&small, false, 0); + reset_check_weight(&medium, false, 0); + reset_check_weight(&big, true, 1); + }) + } + + #[test] + fn signed_ext_check_weight_refund_works() { + new_test_ext().execute_with(|| { + // This is half of the max block weight + let info = DispatchInfo { weight: 512, ..Default::default() }; + let post_info = PostDispatchInfo { actual_weight: Some(128), }; + let len = 0_usize; + + // We allow 75% for normal transaction, so we put 25% - extrinsic base weight + BlockWeight::mutate(|current_weight| { + current_weight.put(256 - ::ExtrinsicBaseWeight::get(), DispatchClass::Normal) + }); + + let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + assert_eq!(BlockWeight::get().total(), info.weight + 256); + + assert!( + CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) + .is_ok() + ); + assert_eq!( + BlockWeight::get().total(), + post_info.actual_weight.unwrap() + 256, + ); + }) + } + + #[test] + fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { + new_test_ext().execute_with(|| { + let info = DispatchInfo { weight: 512, ..Default::default() }; + let post_info = PostDispatchInfo { actual_weight: Some(700), }; + let len = 0_usize; + + BlockWeight::mutate(|current_weight| { + current_weight.put(128, DispatchClass::Normal) + }); + + let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + assert_eq!( + BlockWeight::get().total(), + info.weight + 128 + ::ExtrinsicBaseWeight::get(), + ); + + assert!( + CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) + .is_ok() + ); + assert_eq!( + BlockWeight::get().total(), + info.weight + 128 + ::ExtrinsicBaseWeight::get(), + ); + }) + } + + #[test] + fn zero_weight_extrinsic_still_has_base_weight() { + new_test_ext().execute_with(|| { + let free = DispatchInfo { weight: 0, ..Default::default() }; + let len = 0_usize; + + // Initial weight from `BlockExecutionWeight` + assert_eq!(System::block_weight().total(), ::BlockExecutionWeight::get()); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); + assert!(r.is_ok()); + assert_eq!( + System::block_weight().total(), + ::ExtrinsicBaseWeight::get() + ::BlockExecutionWeight::get() + ); + }) + } +} diff --git a/frame/system/src/extensions/mod.rs b/frame/system/src/extensions/mod.rs new file mode 100644 index 0000000000..ff61353e2d --- /dev/null +++ b/frame/system/src/extensions/mod.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod check_genesis; +pub mod check_mortality; +pub mod check_nonce; +pub mod check_spec_version; +pub mod check_tx_version; +pub mod check_weight; + diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 8eec6a2c37..18723fff29 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -102,17 +102,12 @@ use sp_std::marker::PhantomData; use sp_std::fmt::Debug; use sp_version::RuntimeVersion; use sp_runtime::{ - RuntimeDebug, Perbill, DispatchError, DispatchResult, Either, - generic::{self, Era}, - transaction_validity::{ - ValidTransaction, TransactionPriority, TransactionLongevity, TransactionValidityError, - InvalidTransaction, TransactionValidity, - }, + RuntimeDebug, Perbill, DispatchError, Either, generic, traits::{ - self, CheckEqual, AtLeast32Bit, Zero, SignedExtension, Lookup, LookupError, - SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, SaturatedConversion, + self, CheckEqual, AtLeast32Bit, Zero, Lookup, LookupError, + SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, DispatchInfoOf, PostDispatchInfoOf, Printable, + Dispatchable, }, offchain::storage_lock::BlockNumberProvider, }; @@ -126,7 +121,7 @@ use frame_support::{ StoredMap, EnsureOrigin, OriginTrait, Filter, }, weights::{ - Weight, RuntimeDbWeight, DispatchInfo, PostDispatchInfo, DispatchClass, + Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, extract_actual_weight, }, dispatch::DispatchResultWithPostInfo, @@ -137,6 +132,21 @@ use codec::{Encode, Decode, FullCodec, EncodeLike}; use sp_io::TestExternalities; pub mod offchain; +#[cfg(test)] +pub(crate) mod mock; + +mod extensions; +mod weights; +#[cfg(test)] +mod tests; + +pub use extensions::{ + check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, + check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, + check_weight::CheckWeight, +}; +// Backward compatible re-export. +pub use extensions::check_mortality::CheckMortality as CheckEra; /// Compute the trie root of a list of extrinsics. pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { @@ -372,60 +382,6 @@ impl From for LastRuntimeUpgradeInfo { } } -/// An object to track the currently used extrinsic weight in a block. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct ExtrinsicsWeight { - normal: Weight, - operational: Weight, -} - -impl ExtrinsicsWeight { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - self.normal.saturating_add(self.operational) - } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); - } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; - Ok(()) - } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); - } - - /// Get the current weight of a specific dispatch class. - pub fn get(&self, class: DispatchClass) -> Weight { - match class { - DispatchClass::Operational => self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => self.normal, - } - } - - /// Get a mutable reference to the current weight of a specific dispatch class. - fn get_mut(&mut self, class: DispatchClass) -> &mut Weight { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => &mut self.normal, - } - } - - /// Set the weight of a specific dispatch class. - pub fn put(&mut self, new: Weight, class: DispatchClass) { - *self.get_mut(class) = new; - } -} - decl_storage! { trait Store for Module as System { /// The full account information for a particular account ID. @@ -436,7 +392,7 @@ decl_storage! { ExtrinsicCount: Option; /// The current weight for the block. - BlockWeight get(fn block_weight): ExtrinsicsWeight; + BlockWeight get(fn block_weight): weights::ExtrinsicsWeight; /// Total length (in bytes) for all extrinsics put together, for the current block. AllExtrinsicsLen: Option; @@ -1372,360 +1328,6 @@ pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S } } -/// resource limit check. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckWeight(PhantomData); - -impl CheckWeight where - T::Call: Dispatchable -{ - /// Get the quota ratio of each dispatch class type. This indicates that all operational and mandatory - /// dispatches can use the full capacity of any resource, while user-triggered ones can consume - /// a portion. - fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { - match class { - DispatchClass::Operational | DispatchClass::Mandatory - => ::one(), - DispatchClass::Normal => T::AvailableBlockRatio::get(), - } - } - - /// Checks if the current extrinsic does not exceed `MaximumExtrinsicWeight` limit. - fn check_extrinsic_weight( - info: &DispatchInfoOf, - ) -> Result<(), TransactionValidityError> { - match info.class { - // Mandatory transactions are included in a block unconditionally, so - // we don't verify weight. - DispatchClass::Mandatory => Ok(()), - // Normal transactions must not exceed `MaximumExtrinsicWeight`. - DispatchClass::Normal => { - let maximum_weight = T::MaximumExtrinsicWeight::get(); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > maximum_weight { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, - // For operational transactions we make sure it doesn't exceed - // the space alloted for `Operational` class. - DispatchClass::Operational => { - let maximum_weight = T::MaximumBlockWeight::get(); - let operational_limit = - Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let operational_limit = - operational_limit.saturating_sub(T::BlockExecutionWeight::get()); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > operational_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, - } - } - - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. - /// - /// Upon successes, it returns the new block weight as a `Result`. - fn check_block_weight( - info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::MaximumBlockWeight::get(); - let mut all_weight = Module::::block_weight(); - match info.class { - // If we have a dispatch that must be included in the block, it ignores all the limits. - DispatchClass::Mandatory => { - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - all_weight.add(extrinsic_weight, DispatchClass::Mandatory); - Ok(all_weight) - }, - // If we have a normal dispatch, we follow all the normal rules and limits. - DispatchClass::Normal => { - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Normal) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - if all_weight.get(DispatchClass::Normal) > normal_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(all_weight) - } - }, - // If we have an operational dispatch, allow it if we have not used our full - // "operational space" (independent of existing fullness). - DispatchClass::Operational => { - let operational_limit = Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let operational_space = operational_limit.saturating_sub(normal_limit); - - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Operational) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - - // If it would fit in normally, its okay - if all_weight.total() <= maximum_weight || - // If we have not used our operational space - all_weight.get(DispatchClass::Operational) <= operational_space { - Ok(all_weight) - } else { - Err(InvalidTransaction::ExhaustsResources.into()) - } - } - } - } - - /// Checks if the current extrinsic can fit into the block with respect to block length limits. - /// - /// Upon successes, it returns the new block length as a `Result`. - fn check_block_length( - info: &DispatchInfoOf, - len: usize, - ) -> Result { - let current_len = Module::::all_extrinsics_len(); - let maximum_len = T::MaximumBlockLength::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; - let added_len = len as u32; - let next_len = current_len.saturating_add(added_len); - if next_len > limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(next_len) - } - } - - /// get the priority of an extrinsic denoted by `info`. - fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { - match info.class { - DispatchClass::Normal => info.weight.into(), - // Don't use up the whole priority space, to allow things like `tip` - // to be taken into account as well. - DispatchClass::Operational => TransactionPriority::max_value() / 2, - // Mandatory extrinsics are only for inherents; never transactions. - DispatchClass::Mandatory => TransactionPriority::min_value(), - } - } - - /// Creates new `SignedExtension` to check weight of the extrinsic. - pub fn new() -> Self { - Self(PhantomData) - } - - /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. - /// - /// It checks and notes the new weight and length. - fn do_pre_dispatch( - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_block_weight(info)?; - Self::check_extrinsic_weight(info)?; - - AllExtrinsicsLen::put(next_len); - BlockWeight::put(next_weight); - Ok(()) - } - - /// Do the validate checks. This can be applied to both signed and unsigned. - /// - /// It only checks that the block weight and length limit will not exceed. - fn do_validate( - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - // ignore the next length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - // during validation we skip block limit check. Since the `validate_transaction` - // call runs on an empty block anyway, by this we prevent `on_initialize` weight - // consumption from causing false negatives. - Self::check_extrinsic_weight(info)?; - - Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) - } -} - -impl SignedExtension for CheckWeight where - T::Call: Dispatchable -{ - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "CheckWeight"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - if info.class == DispatchClass::Mandatory { - Err(InvalidTransaction::MandatoryDispatch)? - } - Self::do_pre_dispatch(info, len) - } - - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if info.class == DispatchClass::Mandatory { - Err(InvalidTransaction::MandatoryDispatch)? - } - Self::do_validate(info, len) - } - - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) - } - - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) - } - - fn post_dispatch( - _pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - _len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - // Since mandatory dispatched do not get validated for being overweight, we are sensitive - // to them actually being useful. Block producers are thus not allowed to include mandatory - // extrinsics that result in error. - if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { - "Bad mandantory".print(); - e.print(); - - Err(InvalidTransaction::BadMandatory)? - } - - let unspent = post_info.calc_unspent(info); - if unspent > 0 { - BlockWeight::mutate(|current_weight| { - current_weight.sub(unspent, info.class); - }) - } - - Ok(()) - } -} - -impl Debug for CheckWeight { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckWeight") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -/// Nonce check and increment to give replay protection for transactions. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckNonce(#[codec(compact)] T::Index); - -impl CheckNonce { - /// utility constructor. Used only in client/factory code. - pub fn from(nonce: T::Index) -> Self { - Self(nonce) - } -} - -impl Debug for CheckNonce { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckNonce({})", self.0) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl SignedExtension for CheckNonce where - T::Call: Dispatchable -{ - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "CheckNonce"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = Account::::get(who); - if self.0 != account.nonce { - return Err( - if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - }.into() - ) - } - account.nonce += T::Index::one(); - Account::::insert(who, account); - Ok(()) - } - - fn validate( - &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - // check index - let account = Account::::get(who); - if self.0 < account.nonce { - return InvalidTransaction::Stale.into() - } - - let provides = vec![Encode::encode(&(who, self.0))]; - let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] - } else { - vec![] - }; - - Ok(ValidTransaction { - priority: info.weight as TransactionPriority, - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - } -} impl IsDeadAccount for Module { fn is_dead_account(who: &T::AccountId) -> bool { @@ -1733,167 +1335,6 @@ impl IsDeadAccount for Module { } } -/// Check for transaction mortality. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckEra(Era, sp_std::marker::PhantomData); - -impl CheckEra { - /// utility constructor. Used only in client/factory code. - pub fn from(era: Era) -> Self { - Self(era, sp_std::marker::PhantomData) - } -} - -impl Debug for CheckEra { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckEra({:?})", self.0) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl SignedExtension for CheckEra { - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = T::Hash; - type Pre = (); - const IDENTIFIER: &'static str = "CheckEra"; - - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { - let current_u64 = >::block_number().saturated_into::(); - let n = self.0.birth(current_u64).saturated_into::(); - if !>::contains_key(n) { - Err(InvalidTransaction::AncientBirthBlock.into()) - } else { - Ok(>::block_hash(n)) - } - } -} - -/// Nonce check and increment to give replay protection for transactions. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckGenesis(sp_std::marker::PhantomData); - -impl Debug for CheckGenesis { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckGenesis") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. - pub fn new() -> Self { - Self(sp_std::marker::PhantomData) - } -} - -impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = T::Hash; - type Pre = (); - const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { - Ok(>::block_hash(T::BlockNumber::zero())) - } -} - -/// Ensure the transaction version registered in the transaction is the same as at present. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckTxVersion(sp_std::marker::PhantomData); - -impl Debug for CheckTxVersion { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckTxVersion") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl CheckTxVersion { - /// Create new `SignedExtension` to check transaction version. - pub fn new() -> Self { - Self(sp_std::marker::PhantomData) - } -} - -impl SignedExtension for CheckTxVersion { - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "CheckTxVersion"; - - fn additional_signed(&self) -> Result { - Ok(>::runtime_version().transaction_version) - } -} - -/// Ensure the runtime version registered in the transaction is the same as at present. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckSpecVersion(sp_std::marker::PhantomData); - -impl Debug for CheckSpecVersion { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckSpecVersion") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl CheckSpecVersion { - /// Create new `SignedExtension` to check runtime version. - pub fn new() -> Self { - Self(sp_std::marker::PhantomData) - } -} - -impl SignedExtension for CheckSpecVersion { - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "CheckSpecVersion"; - - fn additional_signed(&self) -> Result { - Ok(>::runtime_version().spec_version) - } -} - pub struct ChainContext(sp_std::marker::PhantomData); impl Default for ChainContext { fn default() -> Self { @@ -1909,886 +1350,3 @@ impl Lookup for ChainContext { ::lookup(s) } } - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use sp_std::cell::RefCell; - use sp_core::H256; - use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, SignedExtension}, testing::Header, DispatchError}; - use frame_support::{ - impl_outer_origin, parameter_types, assert_ok, assert_noop, - weights::{WithPostDispatchInfo, Pays}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = super {} - } - - #[derive(Clone, Eq, PartialEq, Debug)] - pub struct Test; - - parameter_types! { - pub const BlockHashCount: u64 = 10; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumExtrinsicWeight: Weight = 768; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 1024; - pub Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_version::create_apis_vec!([]), - transaction_version: 1, - }; - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 10, - write: 100, - }; - } - - thread_local!{ - pub static KILLED: RefCell> = RefCell::new(vec![]); - } - - pub struct RecordKilled; - impl OnKilledAccount for RecordKilled { - fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } - } - - #[derive(Debug, codec::Encode, codec::Decode)] - pub struct Call; - - impl Dispatchable for Call { - type Origin = Origin; - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } - } - - impl Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Call = Call; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = Version; - type ModuleToIndex = (); - type AccountData = u32; - type OnNewAccount = (); - type OnKilledAccount = RecordKilled; - } - - type System = Module; - type SysEvent = ::Event; - - const CALL: &::Call = &Call; - - fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig::default().build_storage::().unwrap().into(); - // Add to each test the initial weight of a block - ext.execute_with(|| System::register_extra_weight_unchecked(::BlockExecutionWeight::get(), DispatchClass::Mandatory)); - ext - } - - fn normal_weight_limit() -> Weight { - ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() - } - - fn normal_length_limit() -> u32 { - ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() - } - - #[test] - fn origin_works() { - let o = Origin::from(RawOrigin::::Signed(1u64)); - let x: Result, Origin> = o.into(); - assert_eq!(x.unwrap(), RawOrigin::::Signed(1u64)); - } - - #[test] - fn stored_map_works() { - new_test_ext().execute_with(|| { - System::insert(&0, 42); - assert!(System::allow_death(&0)); - - System::inc_ref(&0); - assert!(!System::allow_death(&0)); - - System::insert(&0, 69); - assert!(!System::allow_death(&0)); - - System::dec_ref(&0); - assert!(System::allow_death(&0)); - - assert!(KILLED.with(|r| r.borrow().is_empty())); - System::kill_account(&0); - assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); - }); - } - - #[test] - fn deposit_event_should_work() { - new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::note_finished_extrinsics(); - System::deposit_event(SysEvent::CodeUpdated); - System::finalize(); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: SysEvent::CodeUpdated, - topics: vec![], - } - ] - ); - - System::initialize( - &2, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::deposit_event(SysEvent::NewAccount(32)); - System::note_finished_initialize(); - System::deposit_event(SysEvent::KilledAccount(42)); - System::note_applied_extrinsic(&Ok(().into()), Default::default()); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.into()), - Default::default() - ); - System::note_finished_extrinsics(); - System::deposit_event(SysEvent::NewAccount(3)); - System::finalize(); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: SysEvent::NewAccount(32), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::KilledAccount(42), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess(Default::default()), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicFailed( - DispatchError::BadOrigin.into(), - Default::default() - ), - topics: vec![] - }, - EventRecord { - phase: Phase::Finalization, - event: SysEvent::NewAccount(3), - topics: vec![] - }, - ] - ); - }); - } - - #[test] - fn deposit_event_uses_actual_weight() { - new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::note_finished_initialize(); - - let pre_info = DispatchInfo { - weight: 1000, - .. Default::default() - }; - System::note_applied_extrinsic( - &Ok(Some(300).into()), - pre_info, - ); - System::note_applied_extrinsic( - &Ok(Some(1000).into()), - pre_info, - ); - System::note_applied_extrinsic( - // values over the pre info should be capped at pre dispatch value - &Ok(Some(1200).into()), - pre_info, - ); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.with_weight(999)), - pre_info, - ); - - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 300, - .. Default::default() - }, - ), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(3), - event: SysEvent::ExtrinsicFailed( - DispatchError::BadOrigin.into(), - DispatchInfo { - weight: 999, - .. Default::default() - }, - ), - topics: vec![] - }, - ] - ); - }); - } - - #[test] - fn deposit_event_topics() { - new_test_ext().execute_with(|| { - const BLOCK_NUMBER: u64 = 1; - - System::initialize( - &BLOCK_NUMBER, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::note_finished_extrinsics(); - - let topics = vec![ - H256::repeat_byte(1), - H256::repeat_byte(2), - H256::repeat_byte(3), - ]; - - // We deposit a few events with different sets of topics. - System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1)); - System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2)); - System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3)); - - System::finalize(); - - // Check that topics are reflected in the event record. - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: SysEvent::NewAccount(1), - topics: topics[0..3].to_vec(), - }, - EventRecord { - phase: Phase::Finalization, - event: SysEvent::NewAccount(2), - topics: topics[0..1].to_vec(), - }, - EventRecord { - phase: Phase::Finalization, - event: SysEvent::NewAccount(3), - topics: topics[1..2].to_vec(), - } - ] - ); - - // Check that the topic-events mapping reflects the deposited topics. - // Note that these are indexes of the events. - assert_eq!( - System::event_topics(&topics[0]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], - ); - assert_eq!( - System::event_topics(&topics[1]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], - ); - assert_eq!( - System::event_topics(&topics[2]), - vec![(BLOCK_NUMBER, 0)], - ); - }); - } - - #[test] - fn prunes_block_hash_mappings() { - new_test_ext().execute_with(|| { - // simulate import of 15 blocks - for n in 1..=15 { - System::initialize( - &n, - &[n as u8 - 1; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - - System::finalize(); - } - - // first 5 block hashes are pruned - for n in 0..5 { - assert_eq!( - System::block_hash(n), - H256::zero(), - ); - } - - // the remaining 10 are kept - for n in 5..15 { - assert_eq!( - System::block_hash(n), - [n as u8; 32].into(), - ); - } - }) - } - - #[test] - fn signed_ext_check_nonce_works() { - new_test_ext().execute_with(|| { - Account::::insert(1, AccountInfo { nonce: 1, refcount: 0, data: 0 }); - let info = DispatchInfo::default(); - let len = 0_usize; - // stale - assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); - assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); - // correct - assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); - // future - assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); - }) - } - - #[test] - fn signed_ext_check_weight_works_normal_tx() { - new_test_ext().execute_with(|| { - let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: 100, ..Default::default() }; - let medium = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get(), - ..Default::default() - }; - let big = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get() + 1, - ..Default::default() - }; - let len = 0_usize; - - let reset_check_weight = |i, f, s| { - BlockWeight::mutate(|current_weight| { - current_weight.put(s, DispatchClass::Normal) - }); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } - }; - - reset_check_weight(&small, false, 0); - reset_check_weight(&medium, false, 0); - reset_check_weight(&big, true, 1); - }) - } - - #[test] - fn signed_ext_check_weight_refund_works() { - new_test_ext().execute_with(|| { - // This is half of the max block weight - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(128), }; - let len = 0_usize; - - // We allow 75% for normal transaction, so we put 25% - extrinsic base weight - BlockWeight::mutate(|current_weight| { - current_weight.put(256 - ::ExtrinsicBaseWeight::get(), DispatchClass::Normal) - }); - - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(BlockWeight::get().total(), info.weight + 256); - - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); - assert_eq!( - BlockWeight::get().total(), - post_info.actual_weight.unwrap() + 256, - ); - }) - } - - #[test] - fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { - new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(700), }; - let len = 0_usize; - - BlockWeight::mutate(|current_weight| { - current_weight.put(128, DispatchClass::Normal) - }); - - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), - ); - - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); - assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), - ); - }) - } - - #[test] - fn zero_weight_extrinsic_still_has_base_weight() { - new_test_ext().execute_with(|| { - let free = DispatchInfo { weight: 0, ..Default::default() }; - let len = 0_usize; - - // Initial weight from `BlockExecutionWeight` - assert_eq!(System::block_weight().total(), ::BlockExecutionWeight::get()); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); - assert!(r.is_ok()); - assert_eq!( - System::block_weight().total(), - ::ExtrinsicBaseWeight::get() + ::BlockExecutionWeight::get() - ); - }) - } - - #[test] - fn mandatory_extrinsic_doesnt_care_about_limits() { - fn check(call: impl FnOnce(&DispatchInfo, usize)) { - new_test_ext().execute_with(|| { - let max = DispatchInfo { - weight: Weight::max_value(), - class: DispatchClass::Mandatory, - ..Default::default() - }; - let len = 0_usize; - - call(&max, len); - }); - } - - check(|max, len| { - assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); - assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); - }); - check(|max, len| { - assert_ok!(CheckWeight::::do_validate(max, len)); - }); - } - - #[test] - fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { - new_test_ext().execute_with(|| { - let max = DispatchInfo { - weight: MaximumExtrinsicWeight::get() + 1, - class: DispatchClass::Normal, - ..Default::default() - }; - let len = 0_usize; - - assert_noop!( - CheckWeight::::do_validate(&max, len), - InvalidTransaction::ExhaustsResources - ); - }); - } - - #[test] - fn operational_extrinsic_limited_by_operational_space_limit() { - new_test_ext().execute_with(|| { - let operational_limit = CheckWeight::::get_dispatch_limit_ratio( - DispatchClass::Operational - ) * ::MaximumBlockWeight::get(); - let base_weight = ::ExtrinsicBaseWeight::get(); - let block_base = ::BlockExecutionWeight::get(); - - let weight = operational_limit - base_weight - block_base; - let okay = DispatchInfo { - weight, - class: DispatchClass::Operational, - ..Default::default() - }; - let max = DispatchInfo { - weight: weight + 1, - class: DispatchClass::Operational, - ..Default::default() - }; - let len = 0_usize; - - assert_eq!( - CheckWeight::::do_validate(&okay, len), - Ok(ValidTransaction { - priority: CheckWeight::::get_priority(&okay), - ..Default::default() - }) - ); - assert_noop!( - CheckWeight::::do_validate(&max, len), - InvalidTransaction::ExhaustsResources - ); - }); - } - - #[test] - fn register_extra_weight_unchecked_doesnt_care_about_limits() { - new_test_ext().execute_with(|| { - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); - assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); - }); - } - - #[test] - fn full_block_with_normal_and_operational() { - new_test_ext().execute_with(|| { - // Max block is 1024 - // Max normal is 768 (75%) - // 10 is taken for block execution weight - // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) - // And Operational can be 256 to produce a full block (-5 for base) - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; - - let len = 0_usize; - - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), 768); - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); - // Checking single extrinsic should not take current block weight into account. - assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); - }); - } - - #[test] - fn dispatch_order_does_not_effect_weight_logic() { - new_test_ext().execute_with(|| { - // We switch the order of `full_block_with_normal_and_operational` - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; - - let len = 0_usize; - - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - // Extra 15 here from block execution + base extrinsic weight - assert_eq!(System::block_weight().total(), 266); - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); - }); - } - - #[test] - fn operational_works_on_full_block() { - new_test_ext().execute_with(|| { - // An on_initialize takes up the whole block! (Every time!) - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); - let dispatch_normal = DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; - let dispatch_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; - let len = 0_usize; - - assert_noop!(CheckWeight::::do_pre_dispatch(&dispatch_normal, len), InvalidTransaction::ExhaustsResources); - // Thank goodness we can still do an operational transaction to possibly save the blockchain. - assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); - // Not too much though - assert_noop!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len), InvalidTransaction::ExhaustsResources); - // Even with full block, validity of single transaction should be correct. - assert_eq!(CheckWeight::::check_extrinsic_weight(&dispatch_operational), Ok(())); - }); - } - - #[test] - fn signed_ext_check_weight_works_operational_tx() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, ..Default::default() }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; - let len = 0_usize; - let normal_limit = normal_weight_limit(); - - // given almost full block - BlockWeight::mutate(|current_weight| { - current_weight.put(normal_limit, DispatchClass::Normal) - }); - // will not fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - // will fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); - - // likewise for length limit. - let len = 100_usize; - AllExtrinsicsLen::put(normal_length_limit()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); - }) - } - - #[test] - fn signed_ext() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; - let len = 0_usize; - - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &normal, len) - .unwrap() - .priority; - assert_eq!(priority, 100); - - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &op, len) - .unwrap() - .priority; - assert_eq!(priority, u64::max_value() / 2); - }) - } - - #[test] - fn signed_ext_check_weight_block_size_works() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo::default(); - let normal_limit = normal_weight_limit() as usize; - let reset_check_weight = |tx, s, f| { - AllExtrinsicsLen::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } - }; - - reset_check_weight(&normal, normal_limit - 1, false); - reset_check_weight(&normal, normal_limit, false); - reset_check_weight(&normal, normal_limit + 1, true); - - // Operational ones don't have this limit. - let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; - reset_check_weight(&op, normal_limit, false); - reset_check_weight(&op, normal_limit + 100, false); - reset_check_weight(&op, 1024, false); - reset_check_weight(&op, 1025, true); - }) - } - - #[test] - fn signed_ext_check_era_should_work() { - new_test_ext().execute_with(|| { - // future - assert_eq!( - CheckEra::::from(Era::mortal(4, 2)).additional_signed().err().unwrap(), - InvalidTransaction::AncientBirthBlock.into(), - ); - - // correct - System::set_block_number(13); - >::insert(12, H256::repeat_byte(1)); - assert!(CheckEra::::from(Era::mortal(4, 12)).additional_signed().is_ok()); - }) - } - - #[test] - fn signed_ext_check_era_should_change_longevity() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; - let len = 0_usize; - let ext = ( - CheckWeight::(PhantomData), - CheckEra::::from(Era::mortal(16, 256)), - ); - System::set_block_number(17); - >::insert(16, H256::repeat_byte(1)); - - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); - }) - } - - - #[test] - fn set_code_checks_works() { - struct CallInWasm(Vec); - - impl sp_core::traits::CallInWasm for CallInWasm { - fn call_in_wasm( - &self, - _: &[u8], - _: Option>, - _: &str, - _: &[u8], - _: &mut dyn sp_externalities::Externalities, - _: sp_core::traits::MissingHostFunctions, - ) -> Result, String> { - Ok(self.0.clone()) - } - } - - let test_data = vec![ - ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), - ("test2", 1, 1, Err(Error::::InvalidSpecName)), - ("test", 2, 1, Ok(())), - ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), - ]; - - for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { - let version = RuntimeVersion { - spec_name: spec_name.into(), - spec_version, - impl_version, - ..Default::default() - }; - let call_in_wasm = CallInWasm(version.encode()); - - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); - ext.execute_with(|| { - let res = System::set_code( - RawOrigin::Root.into(), - vec![1, 2, 3, 4], - ); - - assert_eq!(expected.map_err(DispatchError::from), res); - }); - } - } - - #[test] - fn set_code_with_real_wasm_blob() { - let executor = substrate_test_runtime_client::new_native_executor(); - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); - ext.execute_with(|| { - System::set_block_number(1); - System::set_code( - RawOrigin::Root.into(), - substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), - ).unwrap(); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: SysEvent::CodeUpdated, - topics: vec![], - }], - ); - }); - } - - #[test] - fn runtime_upgraded_with_set_storage() { - let executor = substrate_test_runtime_client::new_native_executor(); - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); - ext.execute_with(|| { - System::set_storage( - RawOrigin::Root.into(), - vec![( - well_known_keys::CODE.to_vec(), - substrate_test_runtime_client::runtime::WASM_BINARY.to_vec() - )], - ).unwrap(); - }); - } - - #[test] - fn events_not_emitted_during_genesis() { - new_test_ext().execute_with(|| { - // Block Number is zero at genesis - assert!(System::block_number().is_zero()); - System::on_created_account(Default::default()); - assert!(System::events().is_empty()); - // Events will be emitted starting on block 1 - System::set_block_number(1); - System::on_created_account(Default::default()); - assert!(System::events().len() == 1); - }); - } - - #[test] - fn ensure_one_of_works() { - fn ensure_root_or_signed(o: RawOrigin) -> Result, Origin> { - EnsureOneOf::, EnsureSigned>::try_origin(o.into()) - } - - assert_eq!(ensure_root_or_signed(RawOrigin::Root).unwrap(), Either::Left(())); - assert_eq!(ensure_root_or_signed(RawOrigin::Signed(0)).unwrap(), Either::Right(0)); - assert!(ensure_root_or_signed(RawOrigin::None).is_err()) - } -} diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs new file mode 100644 index 0000000000..0484b34ba3 --- /dev/null +++ b/frame/system/src/mock.rs @@ -0,0 +1,124 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use sp_std::cell::RefCell; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + testing::Header, +}; +use frame_support::{ + impl_outer_origin, parameter_types, + weights::PostDispatchInfo, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = super {} +} + +#[derive(Clone, Eq, PartialEq, Debug, Default)] +pub struct Test; + +parameter_types! { + pub const BlockHashCount: u64 = 10; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumExtrinsicWeight: Weight = 768; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const MaximumBlockLength: u32 = 1024; + pub Version: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("system-test"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_version::create_apis_vec!([]), + transaction_version: 1, + }; + pub const BlockExecutionWeight: Weight = 10; + pub const ExtrinsicBaseWeight: Weight = 5; + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 10, + write: 100, + }; +} + +thread_local!{ + pub static KILLED: RefCell> = RefCell::new(vec![]); +} + +pub struct RecordKilled; +impl OnKilledAccount for RecordKilled { + fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } +} + +#[derive(Debug, codec::Encode, codec::Decode)] +pub struct Call; + +impl Dispatchable for Call { + type Origin = Origin; + type Trait = (); + type Info = DispatchInfo; + type PostInfo = PostDispatchInfo; + fn dispatch(self, _origin: Self::Origin) + -> sp_runtime::DispatchResultWithInfo { + panic!("Do not use dummy implementation for dispatch."); + } +} + +impl Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = DbWeight; + type BlockExecutionWeight = BlockExecutionWeight; + type ExtrinsicBaseWeight = ExtrinsicBaseWeight; + type MaximumExtrinsicWeight = MaximumExtrinsicWeight; + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = Version; + type ModuleToIndex = (); + type AccountData = u32; + type OnNewAccount = (); + type OnKilledAccount = RecordKilled; +} + +pub type System = Module; +pub type SysEvent = ::Event; + +pub const CALL: &::Call = &Call; + +/// Create new externalities for `System` module tests. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = GenesisConfig::default().build_storage::().unwrap().into(); + // Add to each test the initial weight of a block + ext.execute_with(|| System::register_extra_weight_unchecked( + ::BlockExecutionWeight::get(), + DispatchClass::Mandatory + )); + ext +} diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 42699362a3..1290ca6378 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -638,7 +638,7 @@ pub trait SignedPayload: Encode { mod tests { use super::*; use codec::Decode; - use crate::tests::{Test as TestRuntime, Call}; + use crate::mock::{Test as TestRuntime, Call}; use sp_core::offchain::{testing, TransactionPoolExt}; use sp_runtime::testing::{UintAuthorityId, TestSignature, TestXt}; diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs new file mode 100644 index 0000000000..2f93dc858f --- /dev/null +++ b/frame/system/src/tests.rs @@ -0,0 +1,424 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use mock::{*, Origin}; +use sp_core::H256; +use sp_runtime::DispatchError; +use frame_support::weights::WithPostDispatchInfo; + +#[test] +fn origin_works() { + let o = Origin::from(RawOrigin::::Signed(1u64)); + let x: Result, Origin> = o.into(); + assert_eq!(x.unwrap(), RawOrigin::::Signed(1u64)); +} + +#[test] +fn stored_map_works() { + new_test_ext().execute_with(|| { + System::insert(&0, 42); + assert!(System::allow_death(&0)); + + System::inc_ref(&0); + assert!(!System::allow_death(&0)); + + System::insert(&0, 69); + assert!(!System::allow_death(&0)); + + System::dec_ref(&0); + assert!(System::allow_death(&0)); + + assert!(KILLED.with(|r| r.borrow().is_empty())); + System::kill_account(&0); + assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); + }); +} + +#[test] +fn deposit_event_should_work() { + new_test_ext().execute_with(|| { + System::initialize( + &1, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_extrinsics(); + System::deposit_event(SysEvent::CodeUpdated); + System::finalize(); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Finalization, + event: SysEvent::CodeUpdated, + topics: vec![], + } + ] + ); + + System::initialize( + &2, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::deposit_event(SysEvent::NewAccount(32)); + System::note_finished_initialize(); + System::deposit_event(SysEvent::KilledAccount(42)); + System::note_applied_extrinsic(&Ok(().into()), Default::default()); + System::note_applied_extrinsic( + &Err(DispatchError::BadOrigin.into()), + Default::default() + ); + System::note_finished_extrinsics(); + System::deposit_event(SysEvent::NewAccount(3)); + System::finalize(); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: SysEvent::NewAccount(32), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::KilledAccount(42), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess(Default::default()), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicFailed( + DispatchError::BadOrigin.into(), + Default::default() + ), + topics: vec![] + }, + EventRecord { + phase: Phase::Finalization, + event: SysEvent::NewAccount(3), + topics: vec![] + }, + ] + ); + }); +} + +#[test] +fn deposit_event_uses_actual_weight() { + new_test_ext().execute_with(|| { + System::initialize( + &1, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_initialize(); + + let pre_info = DispatchInfo { + weight: 1000, + .. Default::default() + }; + System::note_applied_extrinsic( + &Ok(Some(300).into()), + pre_info, + ); + System::note_applied_extrinsic( + &Ok(Some(1000).into()), + pre_info, + ); + System::note_applied_extrinsic( + // values over the pre info should be capped at pre dispatch value + &Ok(Some(1200).into()), + pre_info, + ); + System::note_applied_extrinsic( + &Err(DispatchError::BadOrigin.with_weight(999)), + pre_info, + ); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 300, + .. Default::default() + }, + ), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 1000, + .. Default::default() + }, + ), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: SysEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 1000, + .. Default::default() + }, + ), + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicFailed( + DispatchError::BadOrigin.into(), + DispatchInfo { + weight: 999, + .. Default::default() + }, + ), + topics: vec![] + }, + ] + ); + }); +} + +#[test] +fn deposit_event_topics() { + new_test_ext().execute_with(|| { + const BLOCK_NUMBER: u64 = 1; + + System::initialize( + &BLOCK_NUMBER, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_extrinsics(); + + let topics = vec![ + H256::repeat_byte(1), + H256::repeat_byte(2), + H256::repeat_byte(3), + ]; + + // We deposit a few events with different sets of topics. + System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1)); + System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2)); + System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3)); + + System::finalize(); + + // Check that topics are reflected in the event record. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Finalization, + event: SysEvent::NewAccount(1), + topics: topics[0..3].to_vec(), + }, + EventRecord { + phase: Phase::Finalization, + event: SysEvent::NewAccount(2), + topics: topics[0..1].to_vec(), + }, + EventRecord { + phase: Phase::Finalization, + event: SysEvent::NewAccount(3), + topics: topics[1..2].to_vec(), + } + ] + ); + + // Check that the topic-events mapping reflects the deposited topics. + // Note that these are indexes of the events. + assert_eq!( + System::event_topics(&topics[0]), + vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], + ); + assert_eq!( + System::event_topics(&topics[1]), + vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], + ); + assert_eq!( + System::event_topics(&topics[2]), + vec![(BLOCK_NUMBER, 0)], + ); + }); +} + +#[test] +fn prunes_block_hash_mappings() { + new_test_ext().execute_with(|| { + // simulate import of 15 blocks + for n in 1..=15 { + System::initialize( + &n, + &[n as u8 - 1; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + + System::finalize(); + } + + // first 5 block hashes are pruned + for n in 0..5 { + assert_eq!( + System::block_hash(n), + H256::zero(), + ); + } + + // the remaining 10 are kept + for n in 5..15 { + assert_eq!( + System::block_hash(n), + [n as u8; 32].into(), + ); + } + }) +} + +#[test] +fn set_code_checks_works() { + struct CallInWasm(Vec); + + impl sp_core::traits::CallInWasm for CallInWasm { + fn call_in_wasm( + &self, + _: &[u8], + _: Option>, + _: &str, + _: &[u8], + _: &mut dyn sp_externalities::Externalities, + _: sp_core::traits::MissingHostFunctions, + ) -> Result, String> { + Ok(self.0.clone()) + } + } + + let test_data = vec![ + ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), + ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), + ("test2", 1, 1, Err(Error::::InvalidSpecName)), + ("test", 2, 1, Ok(())), + ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), + ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), + ]; + + for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { + let version = RuntimeVersion { + spec_name: spec_name.into(), + spec_version, + impl_version, + ..Default::default() + }; + let call_in_wasm = CallInWasm(version.encode()); + + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); + ext.execute_with(|| { + let res = System::set_code( + RawOrigin::Root.into(), + vec![1, 2, 3, 4], + ); + + assert_eq!(expected.map_err(DispatchError::from), res); + }); + } +} + +#[test] +fn set_code_with_real_wasm_blob() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), + ).unwrap(); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: SysEvent::CodeUpdated, + topics: vec![], + }], + ); + }); +} + +#[test] +fn runtime_upgraded_with_set_storage() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_storage( + RawOrigin::Root.into(), + vec![( + well_known_keys::CODE.to_vec(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec() + )], + ).unwrap(); + }); +} + +#[test] +fn events_not_emitted_during_genesis() { + new_test_ext().execute_with(|| { + // Block Number is zero at genesis + assert!(System::block_number().is_zero()); + System::on_created_account(Default::default()); + assert!(System::events().is_empty()); + // Events will be emitted starting on block 1 + System::set_block_number(1); + System::on_created_account(Default::default()); + assert!(System::events().len() == 1); + }); +} + +#[test] +fn ensure_one_of_works() { + fn ensure_root_or_signed(o: RawOrigin) -> Result, Origin> { + EnsureOneOf::, EnsureSigned>::try_origin(o.into()) + } + + assert_eq!(ensure_root_or_signed(RawOrigin::Root).unwrap(), Either::Left(())); + assert_eq!(ensure_root_or_signed(RawOrigin::Signed(0)).unwrap(), Either::Right(0)); + assert!(ensure_root_or_signed(RawOrigin::None).is_err()) +} diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs new file mode 100644 index 0000000000..93295093c4 --- /dev/null +++ b/frame/system/src/weights.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Encode, Decode}; +use frame_support::weights::{Weight, DispatchClass}; +use sp_runtime::RuntimeDebug; + +/// An object to track the currently used extrinsic weight in a block. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct ExtrinsicsWeight { + normal: Weight, + operational: Weight, +} + +impl ExtrinsicsWeight { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + self.normal.saturating_add(self.operational) + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } + + /// Get the current weight of a specific dispatch class. + pub fn get(&self, class: DispatchClass) -> Weight { + match class { + DispatchClass::Operational => self.operational, + DispatchClass::Normal | DispatchClass::Mandatory => self.normal, + } + } + + /// Get a mutable reference to the current weight of a specific dispatch class. + fn get_mut(&mut self, class: DispatchClass) -> &mut Weight { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal | DispatchClass::Mandatory => &mut self.normal, + } + } + + /// Set the weight of a specific dispatch class. + pub fn put(&mut self, new: Weight, class: DispatchClass) { + *self.get_mut(class) = new; + } +} -- GitLab From e8378e84ab6355ffe3792b41b9de4481db9353f6 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 24 Jun 2020 17:24:05 +0200 Subject: [PATCH 068/144] Allow where clause in benchmarking (#6461) * WIP * handle where clause in benchmarking * doc * maybe better syntax * line width --- frame/benchmarking/src/lib.rs | 351 ++++++++++++++++++++------------ frame/benchmarking/src/tests.rs | 45 ++-- 2 files changed, 253 insertions(+), 143 deletions(-) diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 47e83cffbc..7cbac3397a 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -85,6 +85,8 @@ pub use paste; /// Example: /// ```ignore /// benchmarks! { +/// where_clause { where T::A: From } // Optional line to give additional bound on `T`. +/// /// // common parameter; just one for this example. /// // will be `1`, `MAX_LENGTH` or any value inbetween /// _ { @@ -173,6 +175,7 @@ pub use paste; #[macro_export] macro_rules! benchmarks { ( + $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? _ { $( let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; @@ -182,6 +185,7 @@ macro_rules! benchmarks { ) => { $crate::benchmarks_iter!( NO_INSTANCE + { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) $( $rest )* @@ -189,9 +193,11 @@ macro_rules! benchmarks { } } +/// Same as [`benchmarks`] but for instantiable module. #[macro_export] macro_rules! benchmarks_instance { ( + $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? _ { $( let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; @@ -201,6 +207,7 @@ macro_rules! benchmarks_instance { ) => { $crate::benchmarks_iter!( INSTANCE + { $( $( $where_ty: $where_bound ),* )? } { $( { $common , $common_from , $common_to , $common_instancer } )* } ( ) $( $rest )* @@ -209,11 +216,12 @@ macro_rules! benchmarks_instance { } #[macro_export] -#[allow(missing_docs)] +#[doc(hidden)] macro_rules! benchmarks_iter { // mutation arm: ( $instance:ident + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) @@ -222,6 +230,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter! { $instance + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) @@ -232,6 +241,7 @@ macro_rules! benchmarks_iter { // no instance mutation arm: ( NO_INSTANCE + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) @@ -240,6 +250,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter! { NO_INSTANCE + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: { @@ -254,6 +265,7 @@ macro_rules! benchmarks_iter { // instance mutation arm: ( INSTANCE + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) @@ -262,6 +274,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter! { INSTANCE + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: { @@ -276,6 +289,7 @@ macro_rules! benchmarks_iter { // iteration arm: ( $instance:ident + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $eval:block @@ -285,29 +299,34 @@ macro_rules! benchmarks_iter { $crate::benchmark_backend! { $instance $name + { $( $where_clause )* } { $( $common )* } { } { $eval } { $( $code )* } $postcode } + + #[cfg(test)] + $crate::impl_benchmark_test!( { $( $where_clause )* } $instance $name ); + $crate::benchmarks_iter!( $instance + { $( $where_clause )* } { $( $common )* } ( $( $names )* $name ) $( $rest )* ); }; // iteration-exit arm - ( $instance:ident { $( $common:tt )* } ( $( $names:ident )* ) ) => { - $crate::selected_benchmark!( $instance $( $names ),* ); - $crate::impl_benchmark!( $instance $( $names ),* ); - #[cfg(test)] - $crate::impl_benchmark_tests!( $instance $( $names ),* ); + ( $instance:ident { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) ) => { + $crate::selected_benchmark!( { $( $where_clause)* } $instance $( $names ),* ); + $crate::impl_benchmark!( { $( $where_clause )* } $instance $( $names ),* ); }; // add verify block to _() format ( $instance:ident + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) @@ -315,6 +334,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter! { $instance + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: _ ( $origin $( , $arg )* ) @@ -325,6 +345,7 @@ macro_rules! benchmarks_iter { // add verify block to name() format ( $instance:ident + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) @@ -332,6 +353,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter! { $instance + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) @@ -342,6 +364,7 @@ macro_rules! benchmarks_iter { // add verify block to {} format ( $instance:ident + { $( $where_clause:tt )* } { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $eval:block @@ -349,6 +372,7 @@ macro_rules! benchmarks_iter { ) => { $crate::benchmarks_iter!( $instance + { $( $where_clause )* } { $( $common )* } ( $( $names )* ) $name { $( $code )* }: $eval @@ -359,10 +383,12 @@ macro_rules! benchmarks_iter { } #[macro_export] -#[allow(missing_docs)] +#[doc(hidden)] macro_rules! benchmark_backend { // parsing arms ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( $common:tt )* } { $( PRE { $( $pre_parsed:tt )* } )* @@ -371,13 +397,15 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $common )* } { + $instance $name { $( $where_clause )* } { $( $common )* } { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } } { $eval } { $( $rest )* } $postcode } }; ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( $common:tt )* } { $( $parsed:tt )* @@ -386,7 +414,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $common )* } { + $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } } { $eval } { $( $rest )* } $postcode @@ -394,6 +422,8 @@ macro_rules! benchmark_backend { }; // mutation arm to look after defaulting to a common param ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( $parsed:tt )* @@ -402,7 +432,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { + $instance $name { $( $where_clause )* } { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -417,6 +447,8 @@ macro_rules! benchmark_backend { }; // mutation arm to look after defaulting only the range to common param ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( $parsed:tt )* @@ -425,7 +457,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { + $instance $name { $( $where_clause )* } { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -440,6 +472,8 @@ macro_rules! benchmark_backend { }; // mutation arm to look after a single tt for param_from. ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( $common:tt )* } { $( $parsed:tt )* @@ -448,7 +482,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $param in ( $param_from ) .. $param_to => $param_instancer; $( $rest )* } $postcode @@ -456,6 +490,8 @@ macro_rules! benchmark_backend { }; // mutation arm to look after the default tail of `=> ()` ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( $common:tt )* } { $( $parsed:tt )* @@ -464,7 +500,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $param in $param_from .. $param_to => (); $( $rest )* } $postcode @@ -472,6 +508,8 @@ macro_rules! benchmark_backend { }; // mutation arm to look after `let _ =` ($instance:ident $name:ident { + $( $where_clause:tt )* + } { $( $common:tt )* } { $( $parsed:tt )* @@ -480,7 +518,7 @@ macro_rules! benchmark_backend { $( $rest:tt )* } $postcode:block) => { $crate::benchmark_backend! { - $instance $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { let $pre_id : _ = $pre_ex; $( $rest )* } $postcode @@ -488,6 +526,8 @@ macro_rules! benchmark_backend { }; // no instance actioning arm (NO_INSTANCE $name:ident { + $( $where_clause:tt )* + } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* @@ -496,7 +536,9 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl $crate::BenchmarkingSetup for $name { + impl $crate::BenchmarkingSetup for $name + where $( $where_clause )* + { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { vec! [ $( @@ -513,7 +555,9 @@ macro_rules! benchmark_backend { )* $( // Prepare instance - let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + let $param = components.iter() + .find(|&c| c.0 == $crate::BenchmarkParameter::$param) + .unwrap().1; )* $( let $pre_id : $pre_ty = $pre_ex; @@ -532,7 +576,9 @@ macro_rules! benchmark_backend { )* $( // Prepare instance - let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + let $param = components.iter() + .find(|&c| c.0 == $crate::BenchmarkParameter::$param) + .unwrap().1; )* $( let $pre_id : $pre_ty = $pre_ex; @@ -546,6 +592,8 @@ macro_rules! benchmark_backend { }; // instance actioning arm (INSTANCE $name:ident { + $( $where_clause:tt )* + } { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* @@ -554,7 +602,9 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl, I: Instance> $crate::BenchmarkingSetupInstance for $name { + impl, I: Instance> $crate::BenchmarkingSetupInstance for $name + where $( $where_clause )* + { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { vec! [ $( @@ -571,7 +621,9 @@ macro_rules! benchmark_backend { )* $( // Prepare instance - let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + let $param = components.iter() + .find(|&c| c.0 == $crate::BenchmarkParameter::$param) + .unwrap().1; )* $( let $pre_id : $pre_ty = $pre_ex; @@ -590,7 +642,9 @@ macro_rules! benchmark_backend { )* $( // Prepare instance - let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + let $param = components.iter() + .find(|&c| c.0 == $crate::BenchmarkParameter::$param) + .unwrap().1; )* $( let $pre_id : $pre_ty = $pre_ex; @@ -604,23 +658,25 @@ macro_rules! benchmark_backend { } } -/// Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. -/// -/// Every variant must implement [`BenchmarkingSetup`]. -/// -/// ```nocompile -/// -/// struct Transfer; -/// impl BenchmarkingSetup for Transfer { ... } -/// -/// struct SetBalance; -/// impl BenchmarkingSetup for SetBalance { ... } -/// -/// selected_benchmark!(Transfer, SetBalance); -/// ``` +// Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. +// +// Every variant must implement [`BenchmarkingSetup`]. +// +// ```nocompile +// +// struct Transfer; +// impl BenchmarkingSetup for Transfer { ... } +// +// struct SetBalance; +// impl BenchmarkingSetup for SetBalance { ... } +// +// selected_benchmark!(Transfer, SetBalance); +// ``` #[macro_export] +#[doc(hidden)] macro_rules! selected_benchmark { ( + { $( $where_clause:tt )* } NO_INSTANCE $( $bench:ident ),* ) => { // The list of available benchmarks for this pallet. @@ -630,7 +686,9 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl $crate::BenchmarkingSetup for SelectedBenchmark { + impl $crate::BenchmarkingSetup for SelectedBenchmark + where $( $where_clause )* + { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { match self { $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::components(&$bench), )* @@ -655,6 +713,7 @@ macro_rules! selected_benchmark { } }; ( + { $( $where_clause:tt )* } INSTANCE $( $bench:ident ),* ) => { // The list of available benchmarks for this pallet. @@ -664,7 +723,9 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance> $crate::BenchmarkingSetupInstance for SelectedBenchmark { + impl, I: Instance> $crate::BenchmarkingSetupInstance for SelectedBenchmark + where $( $where_clause )* + { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { match self { $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::components(&$bench), )* @@ -691,12 +752,14 @@ macro_rules! selected_benchmark { } #[macro_export] +#[doc(hidden)] macro_rules! impl_benchmark { ( + { $( $where_clause:tt )* } NO_INSTANCE $( $name:ident ),* ) => { impl $crate::Benchmarking<$crate::BenchmarkResults> for Module - where T: frame_system::Trait + where T: frame_system::Trait, $( $where_clause )* { fn benchmarks() -> Vec<&'static [u8]> { vec![ $( stringify!($name).as_ref() ),* ] @@ -763,8 +826,11 @@ macro_rules! impl_benchmark { // Run the benchmark `repeat` times. for _ in 0..repeat { - // Set up the externalities environment for the setup we want to benchmark. - let closure_to_benchmark = >::instance(&selected_benchmark, &c)?; + // Set up the externalities environment for the setup we want to + // benchmark. + let closure_to_benchmark = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::instance(&selected_benchmark, &c)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { @@ -776,12 +842,20 @@ macro_rules! impl_benchmark { $crate::benchmarking::commit_db(); // Time the extrinsic logic. - frame_support::debug::trace!(target: "benchmark", "Start Benchmark: {:?} {:?}", name, component_value); + frame_support::debug::trace!( + target: "benchmark", + "Start Benchmark: {:?} {:?}", name, component_value + ); + let start_extrinsic = $crate::benchmarking::current_time(); closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - frame_support::debug::trace!(target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic); + + frame_support::debug::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); @@ -801,10 +875,12 @@ macro_rules! impl_benchmark { } }; ( + { $( $where_clause:tt )* } INSTANCE $( $name:ident ),* ) => { - impl, I: Instance> $crate::Benchmarking<$crate::BenchmarkResults> for Module - where T: frame_system::Trait + impl, I: Instance> $crate::Benchmarking<$crate::BenchmarkResults> + for Module + where T: frame_system::Trait, $( $where_clause )* { fn benchmarks() -> Vec<&'static [u8]> { vec![ $( stringify!($name).as_ref() ),* ] @@ -829,7 +905,9 @@ macro_rules! impl_benchmark { $crate::benchmarking::commit_db(); $crate::benchmarking::wipe_db(); - let components = >::components(&selected_benchmark); + let components = < + SelectedBenchmark as $crate::BenchmarkingSetupInstance + >::components(&selected_benchmark); let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); // Default number of steps for a component. @@ -872,7 +950,9 @@ macro_rules! impl_benchmark { // Run the benchmark `repeat` times. for _ in 0..repeat { // Set up the externalities environment for the setup we want to benchmark. - let closure_to_benchmark = >::instance(&selected_benchmark, &c)?; + let closure_to_benchmark = < + SelectedBenchmark as $crate::BenchmarkingSetupInstance + >::instance(&selected_benchmark, &c)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { @@ -884,12 +964,20 @@ macro_rules! impl_benchmark { $crate::benchmarking::commit_db(); // Time the extrinsic logic. - frame_support::debug::trace!(target: "benchmark", "Start Benchmark: {:?} {:?}", name, component_value); + frame_support::debug::trace!( + target: "benchmark", + "Start Benchmark: {:?} {:?}", name, component_value + ); + let start_extrinsic = $crate::benchmarking::current_time(); closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - frame_support::debug::trace!(target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic); + + frame_support::debug::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); @@ -910,108 +998,115 @@ macro_rules! impl_benchmark { } } -// This creates unit tests from the main benchmark macro. -// They run the benchmark using the `high` and `low` value for each component +// This creates a unit test for one benchmark of the main benchmark macro. +// It runs the benchmark using the `high` and `low` value for each component // and ensure that everything completes successfully. #[macro_export] -macro_rules! impl_benchmark_tests { +#[doc(hidden)] +macro_rules! impl_benchmark_test { ( + { $( $where_clause:tt )* } NO_INSTANCE - $( $name:ident ),* + $name:ident ) => { - $( - $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait - { - let selected_benchmark = SelectedBenchmark::$name; - let components = >::components(&selected_benchmark); - - assert!( - components.len() != 0, - "You need to add components to your benchmark!", - ); - for (_, (name, low, high)) in components.iter().enumerate() { - // Test only the low and high value, assuming values in the middle won't break - for component_value in vec![low, high] { - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(_, (n, _, h))| - if n == name { - (*n, *component_value) - } else { - (*n, *h) - } - ) - .collect(); - - // Set up the verification state - let closure_to_verify = >::verify(&selected_benchmark, &c)?; - - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } + $crate::paste::item! { + fn [] () -> Result<(), &'static str> + where T: frame_system::Trait, $( $where_clause )* + { + let selected_benchmark = SelectedBenchmark::$name; + let components = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::components(&selected_benchmark); + + assert!( + components.len() != 0, + "You need to add components to your benchmark!", + ); + for (_, (name, low, high)) in components.iter().enumerate() { + // Test only the low and high value, assuming values in the middle won't break + for component_value in vec![low, high] { + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(_, (n, _, h))| + if n == name { + (*n, *component_value) + } else { + (*n, *h) + } + ) + .collect(); - // Run verification - closure_to_verify()?; + // Set up the verification state + let closure_to_verify = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::verify(&selected_benchmark, &c)?; - // Reset the state - $crate::benchmarking::wipe_db(); + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); } + + // Run verification + closure_to_verify()?; + + // Reset the state + $crate::benchmarking::wipe_db(); } - Ok(()) } + Ok(()) } - )* + } }; ( + { $( $where_clause:tt )* } INSTANCE - $( $name:ident ),* + $name:ident ) => { - $( - $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait - { - let selected_benchmark = SelectedBenchmark::$name; - let components = >::components(&selected_benchmark); - - for (_, (name, low, high)) in components.iter().enumerate() { - // Test only the low and high value, assuming values in the middle won't break - for component_value in vec![low, high] { - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(_, (n, _, h))| - if n == name { - (*n, *component_value) - } else { - (*n, *h) - } - ) - .collect(); - - // Set up the verification state - let closure_to_verify = >::verify(&selected_benchmark, &c)?; - - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } + $crate::paste::item! { + fn [] () -> Result<(), &'static str> + where T: frame_system::Trait, $( $where_clause )* + { + let selected_benchmark = SelectedBenchmark::$name; + let components = < + SelectedBenchmark as $crate::BenchmarkingSetupInstance + >::components(&selected_benchmark); + + for (_, (name, low, high)) in components.iter().enumerate() { + // Test only the low and high value, assuming values in the middle won't break + for component_value in vec![low, high] { + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(_, (n, _, h))| + if n == name { + (*n, *component_value) + } else { + (*n, *h) + } + ) + .collect(); - // Run verification - closure_to_verify()?; + // Set up the verification state + let closure_to_verify = < + SelectedBenchmark as $crate::BenchmarkingSetupInstance + >::verify(&selected_benchmark, &c)?; - // Reset the state - $crate::benchmarking::wipe_db(); + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); } + + // Run verification + closure_to_verify()?; + + // Reset the state + $crate::benchmarking::wipe_db(); } - Ok(()) } + Ok(()) } - )* + } }; } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 85e8bf5a5c..674d92eb85 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -30,13 +30,17 @@ use frame_support::{ use frame_system::{RawOrigin, ensure_signed, ensure_none}; decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test where + ::OtherEvent: Into<::Event> + { Value get(fn value): Option; } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where + origin: T::Origin, ::OtherEvent: Into<::Event> + { #[weight = 0] fn set_value(origin, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; @@ -56,11 +60,16 @@ impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } -pub trait Trait { +pub trait OtherTrait { + type OtherEvent; +} + +pub trait Trait: OtherTrait where Self::OtherEvent: Into { type Event; type BlockNumber; type AccountId: 'static + Default + Decode; - type Origin: From> + Into, Self::Origin>>; + type Origin: From> + + Into, Self::Origin>>; } #[derive(Clone, Eq, PartialEq)] @@ -100,6 +109,10 @@ impl Trait for Test { type AccountId = u64; } +impl OtherTrait for Test { + type OtherEvent = (); +} + // This function basically just builds a genesis storage key/value store according to // our desired mockup. fn new_test_ext() -> sp_io::TestExternalities { @@ -107,6 +120,8 @@ fn new_test_ext() -> sp_io::TestExternalities { } benchmarks!{ + where_clause { where ::OtherEvent: Into<::Event> } + _ { // Define a common range for `b`. let b in 1 .. 1000 => (); @@ -156,13 +171,13 @@ benchmarks!{ #[test] fn benchmarks_macro_works() { // Check benchmark creation for `set_value`. - let selected_benchmark = SelectedBenchmark::set_value; + let selected = SelectedBenchmark::set_value; - let components = >::components(&selected_benchmark); + let components = >::components(&selected); assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); let closure = >::instance( - &selected_benchmark, + &selected, &[(BenchmarkParameter::b, 1)], ).expect("failed to create closure"); @@ -174,12 +189,12 @@ fn benchmarks_macro_works() { #[test] fn benchmarks_macro_rename_works() { // Check benchmark creation for `other_dummy`. - let selected_benchmark = SelectedBenchmark::other_name; - let components = >::components(&selected_benchmark); + let selected = SelectedBenchmark::other_name; + let components = >::components(&selected); assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); let closure = >::instance( - &selected_benchmark, + &selected, &[(BenchmarkParameter::b, 1)], ).expect("failed to create closure"); @@ -190,13 +205,13 @@ fn benchmarks_macro_rename_works() { #[test] fn benchmarks_macro_works_for_non_dispatchable() { - let selected_benchmark = SelectedBenchmark::sort_vector; + let selected = SelectedBenchmark::sort_vector; - let components = >::components(&selected_benchmark); + let components = >::components(&selected); assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); let closure = >::instance( - &selected_benchmark, + &selected, &[(BenchmarkParameter::x, 1)], ).expect("failed to create closure"); @@ -206,10 +221,10 @@ fn benchmarks_macro_works_for_non_dispatchable() { #[test] fn benchmarks_macro_verify_works() { // Check postcondition for benchmark `set_value` is valid. - let selected_benchmark = SelectedBenchmark::set_value; + let selected = SelectedBenchmark::set_value; let closure = >::verify( - &selected_benchmark, + &selected, &[(BenchmarkParameter::b, 1)], ).expect("failed to create closure"); -- GitLab From 5a85a43104f1c1b934ec43835492b2d36e84b18b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 24 Jun 2020 16:42:27 +0100 Subject: [PATCH 069/144] client: fix print of slot duration on startup (#6495) --- client/consensus/slots/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 950f83fbce..7687d3114b 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -471,7 +471,7 @@ impl SlotDuration { info!( "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", - genesis_slot_duration + genesis_slot_duration.slot_duration() ); genesis_slot_duration -- GitLab From 7f5dd736f42a408b62885669f7d76ef5baa13572 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 24 Jun 2020 21:03:55 +0200 Subject: [PATCH 070/144] Add DB Read/Write Tracking to Benchmarking Pipeline (#6386) * initial mockup * add and wipe * track writes * start to add to pipeline * return all reads/writes * Log reads and writes from bench db * causes panic * Allow multiple commits * commit before ending benchmark * doesn't work??? * fix * Update lib.rs * switch to struct for `BenchmarkResults` * add to output * fix test * line width * @kianenigma review * Add Whitelist to DB Tracking in Benchmarks Pipeline (#6405) * hardcoded whitelist * Add whitelist to pipeline * Remove whitelist pipeline from CLI, add to runtime * clean-up unused db initialized whitelist * Add regression analysis to DB Tracking (#6475) * Add selector * add tests * debug formatter for easy formula * Update client/db/src/bench.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: arkpar Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 1 + bin/node/runtime/Cargo.toml | 1 + bin/node/runtime/src/lib.rs | 20 ++- client/db/src/bench.rs | 169 +++++++++++++++++++- frame/benchmarking/src/analysis.rs | 140 ++++++++++++---- frame/benchmarking/src/lib.rs | 56 ++++++- frame/benchmarking/src/utils.rs | 26 ++- primitives/externalities/src/lib.rs | 21 +++ primitives/runtime-interface/src/impls.rs | 4 + primitives/state-machine/src/backend.rs | 17 +- primitives/state-machine/src/basic.rs | 12 ++ primitives/state-machine/src/ext.rs | 13 ++ primitives/state-machine/src/read_only.rs | 12 ++ utils/frame/benchmarking-cli/src/command.rs | 39 +++-- 14 files changed, 471 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1ea4a479c..1520373790 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3518,6 +3518,7 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", + "hex-literal", "integer-sqrt", "node-primitives", "pallet-authority-discovery", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index b26b53cd6c..3614e4ca0d 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -18,6 +18,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" +hex-literal = "0.2.1" # primitives sp-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/authority-discovery" } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 90bb63874e..8b6831b41e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1075,8 +1075,26 @@ impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} + let whitelist: Vec> = vec![ + // Block Number + // frame_system::Number::::hashed_key().to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), + // Caller 0 Account + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + // Treasury Account + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + ]; + let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); + let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); add_benchmark!(params, batches, b"balances", Balances); add_benchmark!(params, batches, b"collective", Council); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 99ce1edae0..c3bed3e24f 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -24,10 +24,10 @@ use std::collections::HashMap; use hash_db::{Prefix, Hasher}; use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::storage::ChildInfo; +use sp_core::{storage::ChildInfo, hexdisplay::HexDisplay}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend}; +use sp_state_machine::{DBValue, backend::Backend as StateBackend, StorageCollection}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -50,6 +50,40 @@ impl sp_state_machine::Storage> for StorageDb { root: Cell, @@ -59,6 +93,9 @@ pub struct BenchmarkingState { genesis: HashMap, (Vec, i32)>, record: Cell>>, shared_cache: SharedCache, // shared cache is always empty + key_tracker: RefCell, KeyTracker>>, + read_write_tracker: RefCell, + whitelist: RefCell>>, } impl BenchmarkingState { @@ -76,8 +113,13 @@ impl BenchmarkingState { genesis_root: Default::default(), record: Default::default(), shared_cache: new_shared_cache(0, (1, 10)), + key_tracker: Default::default(), + read_write_tracker: Default::default(), + whitelist: Default::default(), }; + state.add_whitelist_to_tracker(); + state.reopen()?; let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( &child_content.child_info, @@ -89,7 +131,7 @@ impl BenchmarkingState { ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); - state.commit(root, transaction)?; + state.commit(root, transaction, Vec::new())?; state.record.take(); Ok(state) } @@ -109,6 +151,86 @@ impl BenchmarkingState { )); Ok(()) } + + fn add_whitelist_to_tracker(&self) { + let mut key_tracker = self.key_tracker.borrow_mut(); + + let whitelisted = KeyTracker { + has_been_read: true, + has_been_written: true, + }; + + let whitelist = self.whitelist.borrow(); + + whitelist.iter().for_each(|key| { + key_tracker.insert(key.to_vec(), whitelisted); + }); + } + + fn wipe_tracker(&self) { + *self.key_tracker.borrow_mut() = HashMap::new(); + self.add_whitelist_to_tracker(); + *self.read_write_tracker.borrow_mut() = Default::default(); + } + + fn add_read_key(&self, key: &[u8]) { + log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); + + let mut key_tracker = self.key_tracker.borrow_mut(); + let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + + let maybe_tracker = key_tracker.get(key); + + let has_been_read = KeyTracker { + has_been_read: true, + has_been_written: false, + }; + + match maybe_tracker { + None => { + key_tracker.insert(key.to_vec(), has_been_read); + read_write_tracker.add_read(); + }, + Some(tracker) => { + if !tracker.has_been_read { + key_tracker.insert(key.to_vec(), has_been_read); + read_write_tracker.add_read(); + } else { + read_write_tracker.add_repeat_read(); + } + } + } + } + + fn add_write_key(&self, key: &[u8]) { + log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); + + let mut key_tracker = self.key_tracker.borrow_mut(); + let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + + let maybe_tracker = key_tracker.get(key); + + // If we have written to the key, we also consider that we have read from it. + let has_been_written = KeyTracker { + has_been_read: true, + has_been_written: true, + }; + + match maybe_tracker { + None => { + key_tracker.insert(key.to_vec(), has_been_written); + read_write_tracker.add_write(); + }, + Some(tracker) => { + if !tracker.has_been_written { + key_tracker.insert(key.to_vec(), has_been_written); + read_write_tracker.add_write(); + } else { + read_write_tracker.add_repeat_write(); + } + } + } + } } fn state_err() -> String { @@ -121,10 +243,12 @@ impl StateBackend> for BenchmarkingState { type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) } fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key) } @@ -133,10 +257,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) } @@ -145,10 +271,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key) } @@ -157,6 +285,7 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { + self.add_read_key(key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } @@ -230,8 +359,11 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) - -> Result<(), Self::Error> + fn commit(&self, + storage_root: as Hasher>::Out, + mut transaction: Self::Transaction, + storage_changes: StorageCollection, + ) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); @@ -245,10 +377,17 @@ impl StateBackend> for BenchmarkingState { } keys.push(key); } - self.record.set(keys); + let mut record = self.record.take(); + record.extend(keys); + self.record.set(record); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; self.root.set(storage_root); - self.db.set(Some(db)) + self.db.set(Some(db)); + + // Track DB Writes + storage_changes.iter().for_each(|(key, _)| { + self.add_write_key(key); + }); } else { return Err("Trying to commit to a closed db".into()) } @@ -272,9 +411,25 @@ impl StateBackend> for BenchmarkingState { self.root.set(self.genesis_root.clone()); self.reopen()?; + self.wipe_tracker(); Ok(()) } + /// Get the key tracking information for the state db. + fn read_write_count(&self) -> (u32, u32, u32, u32) { + let count = *self.read_write_tracker.borrow_mut(); + (count.reads, count.repeat_reads, count.writes, count.repeat_writes) + } + + /// Reset the key tracking information for the state db. + fn reset_read_write_count(&self) { + self.wipe_tracker() + } + + fn set_whitelist(&self, new: Vec>) { + *self.whitelist.borrow_mut() = new; + } + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats)); } diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 0446430975..621f3a2941 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -29,24 +29,40 @@ pub struct Analysis { model: Option, } +pub enum BenchmarkSelector { + ExtrinsicTime, + StorageRootTime, + Reads, + Writes, +} + impl Analysis { - pub fn median_slopes(r: &Vec) -> Option { - let results = r[0].0.iter().enumerate().map(|(i, &(param, _))| { + pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { + let results = r[0].components.iter().enumerate().map(|(i, &(param, _))| { let mut counted = BTreeMap::, usize>::new(); - for (params, _, _) in r.iter() { - let mut p = params.iter().map(|x| x.1).collect::>(); + for result in r.iter() { + let mut p = result.components.iter().map(|x| x.1).collect::>(); p[i] = 0; *counted.entry(p).or_default() += 1; } let others: Vec = counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); let values = r.iter() .filter(|v| - v.0.iter() + v.components.iter() .map(|x| x.1) .zip(others.iter()) .enumerate() .all(|(j, (v1, v2))| j == i || v1 == *v2) - ).map(|(ps, v, _)| (ps[i].1, *v)) + ).map(|result| { + // Extract the data we are interested in analyzing + let data = match selector { + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + }; + (result.components[i].1, data) + }) .collect::>(); (format!("{:?}", param), i, others, values) }).collect::>(); @@ -97,12 +113,18 @@ impl Analysis { }) } - pub fn min_squares_iqr(r: &Vec) -> Option { + pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { let mut results = BTreeMap::, Vec>::new(); - for &(ref params, t, _) in r.iter() { - let p = params.iter().map(|x| x.1).collect::>(); - results.entry(p).or_default().push(t); + for result in r.iter() { + let p = result.components.iter().map(|x| x.1).collect::>(); + results.entry(p).or_default().push(match selector { + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + }) } + for (_, rs) in results.iter_mut() { rs.sort(); let ql = rs.len() / 4; @@ -111,7 +133,7 @@ impl Analysis { let mut data = vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; - let names = r[0].0.iter().map(|x| format!("{:?}", x.0)).collect::>(); + let names = r[0].components.iter().map(|x| format!("{:?}", x.0)).collect::>(); data.extend(names.iter() .enumerate() .map(|(i, p)| ( @@ -217,40 +239,88 @@ impl std::fmt::Display for Analysis { } } +impl std::fmt::Debug for Analysis { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.base)?; + for (&m, n) in self.slopes.iter().zip(self.names.iter()) { + write!(f, " + ({} * {})", m, n)?; + } + write!(f,"") + } +} + #[cfg(test)] mod tests { use super::*; use crate::BenchmarkParameter; + fn benchmark_result( + components: Vec<(BenchmarkParameter, u32)>, + extrinsic_time: u128, + storage_root_time: u128, + reads: u32, + writes: u32, + ) -> BenchmarkResults { + BenchmarkResults { + components, + extrinsic_time, + storage_root_time, + reads, + repeat_reads: 0, + writes, + repeat_writes: 0, + } + } + #[test] fn analysis_median_slopes_should_work() { - let a = Analysis::median_slopes(&vec![ - (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), - (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), - (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), - ]).unwrap(); - assert_eq!(a.base, 10_000_000); - assert_eq!(a.slopes, vec![1_000_000, 100_000]); + let data = vec![ + benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), + benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), + benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + ]; + + let extrinsic_time = Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + assert_eq!(extrinsic_time.base, 10_000_000); + assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); + + let reads = Analysis::median_slopes(&data, BenchmarkSelector::Reads).unwrap(); + assert_eq!(reads.base, 2); + assert_eq!(reads.slopes, vec![1, 0]); + + let writes = Analysis::median_slopes(&data, BenchmarkSelector::Writes).unwrap(); + assert_eq!(writes.base, 0); + assert_eq!(writes.slopes, vec![0, 2]); } #[test] fn analysis_median_min_squares_should_work() { - let a = Analysis::min_squares_iqr(&vec![ - (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), - (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), - (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), - ]).unwrap(); - assert_eq!(a.base, 10_000_000); - assert_eq!(a.slopes, vec![1_000_000, 100_000]); + let data = vec![ + benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), + benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), + benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), + benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + ]; + + let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + assert_eq!(extrinsic_time.base, 10_000_000); + assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); + + let reads = Analysis::min_squares_iqr(&data, BenchmarkSelector::Reads).unwrap(); + assert_eq!(reads.base, 2); + assert_eq!(reads.slopes, vec![1, 0]); + + let writes = Analysis::min_squares_iqr(&data, BenchmarkSelector::Writes).unwrap(); + assert_eq!(writes.base, 0); + assert_eq!(writes.slopes, vec![0, 2]); } } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 7cbac3397a..7a7848305a 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -26,7 +26,7 @@ mod analysis; pub use utils::*; #[cfg(feature = "std")] -pub use analysis::Analysis; +pub use analysis::{Analysis, BenchmarkSelector}; #[doc(hidden)] pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::Zero; @@ -771,6 +771,7 @@ macro_rules! impl_benchmark { highest_range_values: &[u32], steps: &[u32], repeat: u32, + whitelist: &[Vec] ) -> Result, &'static str> { // Map the input to the selected benchmark. let extrinsic = sp_std::str::from_utf8(extrinsic) @@ -780,6 +781,9 @@ macro_rules! impl_benchmark { _ => return Err("Could not find extrinsic."), }; + // Add whitelist to DB + $crate::benchmarking::set_whitelist(whitelist.to_vec()); + // Warm up the DB $crate::benchmarking::commit_db(); $crate::benchmarking::wipe_db(); @@ -841,6 +845,9 @@ macro_rules! impl_benchmark { // This will enable worst case scenario for reading from the database. $crate::benchmarking::commit_db(); + // Reset the read/write counter so we don't count operations in the setup process. + $crate::benchmarking::reset_read_write_count(); + // Time the extrinsic logic. frame_support::debug::trace!( target: "benchmark", @@ -851,11 +858,17 @@ macro_rules! impl_benchmark { closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); frame_support::debug::trace!( target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic ); + let read_write_count = $crate::benchmarking::read_write_count(); + frame_support::debug::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); @@ -863,7 +876,15 @@ macro_rules! impl_benchmark { let finish_storage_root = $crate::benchmarking::current_time(); let elapsed_storage_root = finish_storage_root - start_storage_root; - results.push((c.clone(), elapsed_extrinsic, elapsed_storage_root)); + results.push($crate::BenchmarkResults { + components: c.clone(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + }); // Wipe the DB back to the genesis state. $crate::benchmarking::wipe_db(); @@ -892,6 +913,7 @@ macro_rules! impl_benchmark { highest_range_values: &[u32], steps: &[u32], repeat: u32, + whitelist: &[Vec] ) -> Result, &'static str> { // Map the input to the selected benchmark. let extrinsic = sp_std::str::from_utf8(extrinsic) @@ -901,6 +923,9 @@ macro_rules! impl_benchmark { _ => return Err("Could not find extrinsic."), }; + // Add whitelist to DB + $crate::benchmarking::set_whitelist(whitelist.to_vec()); + // Warm up the DB $crate::benchmarking::commit_db(); $crate::benchmarking::wipe_db(); @@ -963,6 +988,9 @@ macro_rules! impl_benchmark { // This will enable worst case scenario for reading from the database. $crate::benchmarking::commit_db(); + // Reset the read/write counter so we don't count operations in the setup process. + $crate::benchmarking::reset_read_write_count(); + // Time the extrinsic logic. frame_support::debug::trace!( target: "benchmark", @@ -973,11 +1001,17 @@ macro_rules! impl_benchmark { closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); frame_support::debug::trace!( target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic ); + let read_write_count = $crate::benchmarking::read_write_count(); + frame_support::debug::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); @@ -985,7 +1019,15 @@ macro_rules! impl_benchmark { let finish_storage_root = $crate::benchmarking::current_time(); let elapsed_storage_root = finish_storage_root - start_storage_root; - results.push((c.clone(), elapsed_extrinsic, elapsed_storage_root)); + results.push($crate::BenchmarkResults { + components: c.clone(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + }); // Wipe the DB back to the genesis state. $crate::benchmarking::wipe_db(); @@ -1139,7 +1181,7 @@ macro_rules! impl_benchmark_test { #[macro_export] macro_rules! add_benchmark { ( $params:ident, $batches:ident, $name:literal, $( $location:tt )* ) => ( - let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat) = $params; + let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist) = $params; if &pallet[..] == &$name[..] || &pallet[..] == &b"*"[..] { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { for benchmark in $( $location )*::benchmarks().into_iter() { @@ -1150,6 +1192,7 @@ macro_rules! add_benchmark { &highest_range_values[..], &steps[..], repeat, + whitelist, )?, pallet: $name.to_vec(), benchmark: benchmark.to_vec(), @@ -1163,6 +1206,7 @@ macro_rules! add_benchmark { &highest_range_values[..], &steps[..], repeat, + whitelist, )?, pallet: $name.to_vec(), benchmark: benchmark.clone(), diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 31ec3783cc..7f9d912110 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -44,7 +44,16 @@ pub struct BenchmarkBatch { /// Results from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. -pub type BenchmarkResults = (Vec<(BenchmarkParameter, u32)>, u128, u128); +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkResults { + pub components: Vec<(BenchmarkParameter, u32)>, + pub extrinsic_time: u128, + pub storage_root_time: u128, + pub reads: u32, + pub repeat_reads: u32, + pub writes: u32, + pub repeat_writes: u32, +} sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. @@ -83,6 +92,20 @@ pub trait Benchmarking { fn commit_db(&mut self) { self.commit() } + + /// Get the read/write count + fn read_write_count(&self) -> (u32, u32, u32, u32) { + self.read_write_count() + } + + /// Reset the read/write count + fn reset_read_write_count(&mut self) { + self.reset_read_write_count() + } + + fn set_whitelist(&mut self, new: Vec>) { + self.set_whitelist(new) + } } /// The pallet benchmarking trait. @@ -106,6 +129,7 @@ pub trait Benchmarking { highest_range_values: &[u32], steps: &[u32], repeat: u32, + whitelist: &[Vec] ) -> Result, &'static str>; } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 210fe5b4ef..8e14186719 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -233,6 +233,27 @@ pub trait Externalities: ExtensionStore { /// /// Commits all changes to the database and clears all caches. fn commit(&mut self); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Gets the current read/write count for the benchmarking process. + fn read_write_count(&self) -> (u32, u32, u32, u32); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Resets read/write count for the benchmarking process. + fn reset_read_write_count(&mut self); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Adds new storage keys to the DB tracking whitelist. + fn set_whitelist(&mut self, new: Vec>); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 217316c3dd..259d3517f0 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -365,6 +365,10 @@ impl PassBy for Option { type PassBy = Codec; } +impl PassBy for (u32, u32, u32, u32) { + type PassBy = Codec; +} + /// Implement `PassBy` with `Inner` for the given fixed sized hash types. macro_rules! for_primitive_types { { $( $hash:ident $n:expr ),* $(,)? } => { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 20a3ab7500..9ec03c4d1e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -212,7 +212,22 @@ pub trait Backend: std::fmt::Debug { } /// Commit given transaction to storage. - fn commit(&self, _: H::Out, _: Self::Transaction) -> Result<(), Self::Error> { + fn commit(&self, _: H::Out, _: Self::Transaction, _: StorageCollection) -> Result<(), Self::Error> { + unimplemented!() + } + + /// Get the read/write count of the db + fn read_write_count(&self) -> (u32, u32, u32, u32) { + unimplemented!() + } + + /// Get the read/write count of the db + fn reset_read_write_count(&self) { + unimplemented!() + } + + /// Update the whitelist for tracking db reads/writes + fn set_whitelist(&self, _: Vec>) { unimplemented!() } } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index dbb4c6c2b8..6f1d2a4b5a 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -322,6 +322,18 @@ impl Externalities for BasicExternalities { fn wipe(&mut self) {} fn commit(&mut self) {} + + fn read_write_count(&self) -> (u32, u32, u32, u32) { + unimplemented!("read_write_count is not supported in Basic") + } + + fn reset_read_write_count(&mut self) { + unimplemented!("reset_read_write_count is not supported in Basic") + } + + fn set_whitelist(&mut self, _: Vec>) { + unimplemented!("set_whitelist is not supported in Basic") + } } impl sp_externalities::ExtensionStore for BasicExternalities { diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2cd63cde97..e25a08adb0 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -590,9 +590,22 @@ where self.backend.commit( changes.transaction_storage_root, changes.transaction, + changes.main_storage_changes, ).expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); } + + fn read_write_count(&self) -> (u32, u32, u32, u32) { + self.backend.read_write_count() + } + + fn reset_read_write_count(&mut self) { + self.backend.reset_read_write_count() + } + + fn set_whitelist(&mut self, new: Vec>) { + self.backend.set_whitelist(new) + } } diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 2a5d7fda36..b8a35ced1e 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -185,6 +185,18 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn wipe(&mut self) {} fn commit(&mut self) {} + + fn read_write_count(&self) -> (u32, u32, u32, u32) { + unimplemented!("read_write_count is not supported in ReadOnlyExternalities") + } + + fn reset_read_write_count(&mut self) { + unimplemented!("reset_read_write_count is not supported in ReadOnlyExternalities") + } + + fn set_whitelist(&mut self, _: Vec>) { + unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") + } } impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for ReadOnlyExternalities<'a, H, B> { diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index f867d75d2a..7f55672885 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -17,7 +17,7 @@ use crate::BenchmarkCmd; use codec::{Decode, Encode}; -use frame_benchmarking::{Analysis, BenchmarkBatch}; +use frame_benchmarking::{Analysis, BenchmarkBatch, BenchmarkSelector}; use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; use sc_client_db::BenchmarkingState; use sc_executor::NativeExecutor; @@ -107,15 +107,22 @@ impl BenchmarkCmd { if self.raw_data { // Print the table header - batch.results[0].0.iter().for_each(|param| print!("{:?},", param.0)); + batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0)); - print!("extrinsic_time,storage_root_time\n"); + print!("extrinsic_time,storage_root_time,reads,repeat_reads,writes,repeat_writes\n"); // Print the values batch.results.iter().for_each(|result| { - let parameters = &result.0; + let parameters = &result.components; parameters.iter().for_each(|param| print!("{:?},", param.1)); // Print extrinsic time and storage root time - print!("{:?},{:?}\n", result.1, result.2); + print!("{:?},{:?},{:?},{:?},{:?},{:?}\n", + result.extrinsic_time, + result.storage_root_time, + result.reads, + result.repeat_reads, + result.writes, + result.repeat_writes, + ); }); println!(); @@ -123,13 +130,27 @@ impl BenchmarkCmd { // Conduct analysis. if !self.no_median_slopes { - if let Some(analysis) = Analysis::median_slopes(&batch.results) { - println!("Median Slopes Analysis\n========\n{}", analysis); + println!("Median Slopes Analysis\n========"); + if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::ExtrinsicTime) { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) { + println!("Writes = {:?}", analysis); } } if !self.no_min_squares { - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results) { - println!("Min Squares Analysis\n========\n{}", analysis); + println!("Min Squares Analysis\n========"); + if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime) { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) { + println!("Writes = {:?}", analysis); } } }, -- GitLab From efab94e606293b329726c834456cd7f00e54bc41 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 25 Jun 2020 10:19:05 +0200 Subject: [PATCH 071/144] Update CODEOWNERS (#6489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update CODEOWNERS * Cleanup CODEOWNERS * Remove myself as a code owner I don’t work on consensus anymore. * Update CODEOWNERS Co-authored-by: Sergei Shulepov Co-authored-by: Demi M. Obenour Co-authored-by: Nikolay Volf --- docs/CODEOWNERS | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 2e1557b4ea..2fb85a4ba1 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -18,15 +18,7 @@ # are more recognizable on GitHub, you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. -# Wasm execution and the wasm side of Substrate Runtime Interface -/client/executor/ @pepyakin -/primitives/io/ @pepyakin @NikVolf - -# Crypto, execution extensions, etc. -/primitives/core/ @NikVolf - # Block production -/primitives/authorship/ @NikVolf /client/basic-authorship/ @NikVolf # Sandboxing capability of Substrate Runtime @@ -48,22 +40,21 @@ /primitives/rpc/ @tomusdrw # GRANDPA, BABE, consensus stuff -/frame/babe/ @andresilva @DemiMarie-parity +/frame/babe/ @andresilva /frame/grandpa/ @andresilva /client/finality-grandpa/ @andresilva -/client/consensus/babe/ @andresilva @DemiMarie-parity -/client/consensus/slots/ @andresilva @DemiMarie-parity +/client/consensus/babe/ @andresilva +/client/consensus/slots/ @andresilva /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas # Contracts /frame/contracts/ @pepyakin -/frame/contracts/src/wasm/runtime.rs @Robbepop # EVM /frame/evm/ @sorpaas -# NPoS and Governance and Phragmén +# NPoS and election /frame/staking/ @kianenigma /frame/elections/ @kianenigma /frame/elections-phragmen/ @kianenigma @@ -72,14 +63,8 @@ # Fixed point arithmetic /primitives/sp-arithmetic/ @kianenigma -# End to end testing of substrate node -/bin/node/executor/ @kianenigma - # Transaction weight stuff -/frame/support/src/weights.rs @kianenigma - -# Support crates -/frame/support/ @kianenigma +/frame/support/src/weights.rs @shawntabrizi # Authority discovery /client/authority-discovery/ @mxinden -- GitLab From 9f51ec74b2114c09ca86c4417644be39c5c53895 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 25 Jun 2020 10:22:30 +0200 Subject: [PATCH 072/144] Panic on invalid unsigned election solution. (#6485) * Panic on invalid * Fix return * Fix refund --- frame/staking/src/lib.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index de61b25483..15bdbfc9d2 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -2194,18 +2194,20 @@ decl_module! { size: ElectionSize, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; - Self::check_and_replace_solution( + let adjustments = Self::check_and_replace_solution( winners, compact, ElectionCompute::Unsigned, score, era, size, - ) - // TODO: instead of returning an error, panic. This makes the entire produced block - // invalid. - // This ensures that block authors will not ever try and submit a solution which is not - // an improvement, since they will lose their authoring points/rewards. + ).expect( + "An unsigned solution can only be submitted by validators; A validator should \ + always produce correct solutions, else this block should not be imported, thus \ + effectively depriving the validators from their authoring reward. Hence, this panic + is expected." + ); + Ok(adjustments) } } } -- GitLab From 73318f426dc392352060c59e4b896eaae2af58ac Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 25 Jun 2020 10:48:30 +0200 Subject: [PATCH 073/144] Import hex_literal into runtime only for benchmarks. (#6502) --- bin/node/runtime/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 3614e4ca0d..6db4057e8c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" -hex-literal = "0.2.1" +hex-literal = { version = "0.2.1", optional = true } # primitives sp-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/authority-discovery" } @@ -168,4 +168,5 @@ runtime-benchmarks = [ "pallet-offences-benchmarking", "pallet-session-benchmarking", "frame-system-benchmarking", + "hex-literal", ] -- GitLab From 41014434bb40482d62fbbbef066438983d55183c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 25 Jun 2020 11:25:28 +0200 Subject: [PATCH 074/144] .maintain/sentry-node: Remove UI and update Prometheus target (#6473) Remove burden on user to build polkadot-js apps Docker image locally in order to get started. Update Prometheus config fixing target name. --- .maintain/sentry-node/docker-compose.yml | 5 ----- .maintain/sentry-node/prometheus/prometheus.yml | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.maintain/sentry-node/docker-compose.yml b/.maintain/sentry-node/docker-compose.yml index 235f2c4963..2af9449853 100644 --- a/.maintain/sentry-node/docker-compose.yml +++ b/.maintain/sentry-node/docker-compose.yml @@ -131,11 +131,6 @@ services: - "sub-authority-discovery=trace" - "--prometheus-external" - ui: - image: polkadot-js/apps - ports: - - "3000:80" - prometheus: image: prom/prometheus networks: diff --git a/.maintain/sentry-node/prometheus/prometheus.yml b/.maintain/sentry-node/prometheus/prometheus.yml index 831b84ba0b..547d4bea57 100644 --- a/.maintain/sentry-node/prometheus/prometheus.yml +++ b/.maintain/sentry-node/prometheus/prometheus.yml @@ -2,7 +2,7 @@ global: scrape_interval: 15s scrape_configs: - - job_name: 'substrate_validator-a' + - job_name: 'substrate-nodes' static_configs: - targets: ['validator-a:9615'] labels: -- GitLab From a2a2776b46001c6ec154262f008136379af70818 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 25 Jun 2020 11:27:37 +0200 Subject: [PATCH 075/144] Staking Payout Creates Controller (#6496) * payout creates controller * update benchmarks * oops * fix session benchmarks * Update weights * fix line width --- frame/session/benchmarking/src/lib.rs | 4 ++-- frame/staking/src/benchmarking.rs | 32 ++++++++++++++++++++++++--- frame/staking/src/lib.rs | 8 ++++--- frame/staking/src/testing_utils.rs | 15 +++++++++++++ frame/staking/src/tests.rs | 30 +++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 8 deletions(-) diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 04b7d55602..0df4dcfbd9 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -45,7 +45,7 @@ benchmarks! { set_keys { let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32, false)?; let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; @@ -53,7 +53,7 @@ benchmarks! { purge_keys { let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32, false)?; let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 1dfa621033..b2035c22b6 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -47,7 +47,11 @@ fn add_slashing_spans(who: &T::AccountId, spans: u32) { // This function generates one validator being nominated by n nominators, and returns the validator // stash account. It also starts an era and creates pending payouts. -pub fn create_validator_with_nominators(n: u32, upper_bound: u32) -> Result { +pub fn create_validator_with_nominators( + n: u32, + upper_bound: u32, + dead: bool, +) -> Result { let mut points_total = 0; let mut points_individual = Vec::new(); @@ -65,7 +69,11 @@ pub fn create_validator_with_nominators(n: u32, upper_bound: u32) -> R // Give the validator n nominators, but keep total users in the system the same. for i in 0 .. upper_bound { - let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - i, 100)?; + let (_n_stash, n_controller) = if !dead { + create_stash_controller::(u32::max_value() - i, 100)? + } else { + create_stash_and_dead_controller::(u32::max_value() - i, 100)? + }; if i < n { Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; } @@ -271,7 +279,8 @@ benchmarks! { payout_stakers { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32)?; + let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, true)?; + let current_era = CurrentEra::get().unwrap(); let caller = account("caller", 0, SEED); let balance_before = T::Currency::free_balance(&validator); @@ -282,6 +291,20 @@ benchmarks! { assert!(balance_before < balance_after); } + payout_stakers_alive_controller { + let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, false)?; + + let current_era = CurrentEra::get().unwrap(); + let caller = account("caller", 0, SEED); + let balance_before = T::Currency::free_balance(&validator); + }: payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era) + verify { + // Validator has been paid! + let balance_after = T::Currency::free_balance(&validator); + assert!(balance_before < balance_after); + } + rebond { let l in 1 .. MAX_UNLOCKING_CHUNKS as u32; let (_, controller) = create_stash_controller::(u, 100)?; @@ -630,6 +653,7 @@ mod tests { let validator_stash = create_validator_with_nominators::( n, ::MaxNominatorRewardedPerValidator::get() as u32, + false, ).unwrap(); let current_era = CurrentEra::get().unwrap(); @@ -650,6 +674,7 @@ mod tests { let validator_stash = create_validator_with_nominators::( n, ::MaxNominatorRewardedPerValidator::get() as u32, + false, ).unwrap(); // Add 20 slashing spans @@ -710,6 +735,7 @@ mod tests { assert_ok!(test_benchmark_force_unstake::()); assert_ok!(test_benchmark_cancel_deferred_slash::()); assert_ok!(test_benchmark_payout_stakers::()); + assert_ok!(test_benchmark_payout_stakers_alive_controller::()); assert_ok!(test_benchmark_rebond::()); assert_ok!(test_benchmark_set_history_depth::()); assert_ok!(test_benchmark_reap_stash::()); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 15bdbfc9d2..029934d982 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1978,7 +1978,9 @@ decl_module! { /// - Contains a limited number of reads and writes. /// ----------- /// N is the Number of payouts for the validator (including the validator) - /// Base Weight: 110 + 54.2 * N µs (Median Slopes) + /// Base Weight: + /// - Reward Destination Staked: 110 + 54.2 * N µs (Median Slopes) + /// - Reward Destination Controller (Creating): 120 + 41.95 * N µs (Median Slopes) /// DB Weight: /// - Read: EraElectionStatus, CurrentEra, HistoryDepth, ErasValidatorReward, /// ErasStakersClipped, ErasRewardPoints, ErasValidatorPrefs (8 items) @@ -1986,7 +1988,7 @@ decl_module! { /// - Write Each: System Account, Locks, Ledger (3 items) /// # #[weight = - 110 * WEIGHT_PER_MICROS + 120 * WEIGHT_PER_MICROS + 54 * WEIGHT_PER_MICROS * Weight::from(T::MaxNominatorRewardedPerValidator::get()) + T::DbWeight::get().reads(7) + T::DbWeight::get().reads(5) * Weight::from(T::MaxNominatorRewardedPerValidator::get() + 1) @@ -2395,7 +2397,7 @@ impl Module { match dest { RewardDestination::Controller => Self::bonded(stash) .and_then(|controller| - T::Currency::deposit_into_existing(&controller, amount).ok() + Some(T::Currency::deposit_creating(&controller, amount)) ), RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index a73073bb1f..27a2575eb0 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -51,6 +51,21 @@ pub fn create_stash_controller(n: u32, balance_factor: u32) return Ok((stash, controller)) } +/// Create a stash and controller pair, where the controller is dead, and payouts go to controller. +/// This is used to test worst case payout scenarios. +pub fn create_stash_and_dead_controller(n: u32, balance_factor: u32) + -> Result<(T::AccountId, T::AccountId), &'static str> +{ + let stash = create_funded_user::("stash", n, balance_factor); + // controller has no funds + let controller = create_funded_user::("controller", n, 0); + let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let reward_destination = RewardDestination::Controller; + let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); + Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, reward_destination)?; + return Ok((stash, controller)) +} + /// create `max` validators. pub fn create_validators( max: u32, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index eeac2c5c90..a3cfed9e2f 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4501,3 +4501,33 @@ fn on_initialize_weight_is_correct() { assert_eq!(final_weight, Staking::on_initialize(System::block_number())); }); } + + +#[test] +fn payout_creates_controller() { + // Here we will test validator can set `max_nominators_payout` and it works. + // We also test that `payout_extra_nominators` works. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Create three validators: + bond_validator(11, 10, balance); // Default(64) + + // Create a stash/controller pair + bond_nominator(1234, 1337, 100, vec![11]); + + // kill controller + assert_ok!(Balances::transfer(Origin::signed(1337), 1234, 100)); + assert_eq!(Balances::free_balance(1337), 0); + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + + // Controller is created + assert!(Balances::free_balance(1337) > 0); + }) +} -- GitLab From 482c1bc741b775e87504d68f3ce4bdde55a7aca3 Mon Sep 17 00:00:00 2001 From: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> Date: Thu, 25 Jun 2020 11:30:01 +0200 Subject: [PATCH 076/144] Include genesis hash in system.connected (#6498) * feat: Include genesis hash in system.connected message for telemetry * chore: Spread call arguments into multiline * chore: Removed commented code --- client/service/src/builder.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index eebc825b21..1fbf301f5b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -46,7 +46,7 @@ use sc_network::NetworkService; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, + Block as BlockT, NumberFor, SaturatedConversion, HashFor, Zero, }; use sp_api::ProvideRuntimeApi; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; @@ -1070,8 +1070,17 @@ ServiceBuilder< // Telemetry let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { + let genesis_hash = match client.block_hash(Zero::zero()) { + Ok(Some(hash)) => hash, + _ => Default::default(), + }; + let (telemetry, future) = build_telemetry( - &mut config, endpoints, telemetry_connection_sinks.clone(), network.clone() + &mut config, + endpoints, + telemetry_connection_sinks.clone(), + network.clone(), + genesis_hash, ); spawn_handle.spawn( @@ -1270,7 +1279,8 @@ fn build_telemetry( config: &mut Configuration, endpoints: sc_telemetry::TelemetryEndpoints, telemetry_connection_sinks: Arc>>>, - network: Arc::Hash>> + network: Arc::Hash>>, + genesis_hash: ::Hash, ) -> (sc_telemetry::Telemetry, Pin + Send>>) { let is_authority = config.role.is_authority(); let network_id = network.local_peer_id().to_base58(); @@ -1296,6 +1306,7 @@ fn build_telemetry( "version" => version, "config" => "", "chain" => chain_name.clone(), + "genesis_hash" => ?genesis_hash, "authority" => is_authority, "startup_time" => startup_time, "network_id" => network_id.clone() -- GitLab From 4be954a8463e6f33e0ec854c4b8bcd940af260fc Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Thu, 25 Jun 2020 11:30:20 +0200 Subject: [PATCH 077/144] Bound Unsigned when signed is not supported. (#6367) * bound unsigned when necessary * convert more type to AtLeast32BitUnsigned * Update primitives/arithmetic/src/traits.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * doc refactor * line reorganize Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- frame/assets/src/lib.rs | 4 +- frame/balances/src/lib.rs | 6 +-- frame/generic-asset/src/lib.rs | 25 +++------- frame/im-online/src/lib.rs | 4 +- frame/staking/src/inflation.rs | 4 +- frame/staking/src/lib.rs | 8 +-- frame/support/src/traits.rs | 7 +-- frame/support/src/weights.rs | 6 +-- frame/system/src/lib.rs | 7 +-- frame/vesting/src/lib.rs | 6 +-- primitives/arithmetic/src/per_things.rs | 50 ++++++++++++------- primitives/arithmetic/src/traits.rs | 5 ++ primitives/runtime/src/curve.rs | 6 +-- primitives/runtime/src/generic/header.rs | 7 +-- .../runtime/src/offchain/storage_lock.rs | 4 +- primitives/runtime/src/traits.rs | 9 ++-- 16 files changed, 84 insertions(+), 74 deletions(-) diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index d428f435b6..159546ccb3 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -134,7 +134,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; -use sp_runtime::traits::{Member, AtLeast32Bit, Zero, StaticLookup}; +use sp_runtime::traits::{Member, AtLeast32Bit, AtLeast32BitUnsigned, Zero, StaticLookup}; use frame_system::{self as system, ensure_signed}; use sp_runtime::traits::One; @@ -144,7 +144,7 @@ pub trait Trait: frame_system::Trait { type Event: From> + Into<::Event>; /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32Bit + Default + Copy; + type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; /// The arithmetic type of asset identifier. type AssetId: Parameter + AtLeast32Bit + Default + Copy; diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index f7ccb86e60..62402c7863 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -170,7 +170,7 @@ use frame_support::{ use sp_runtime::{ RuntimeDebug, DispatchResult, DispatchError, traits::{ - Zero, AtLeast32Bit, StaticLookup, Member, CheckedAdd, CheckedSub, + Zero, AtLeast32BitUnsigned, StaticLookup, Member, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating, Bounded, }, }; @@ -180,7 +180,7 @@ pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub trait Subtrait: frame_system::Trait { /// The balance of an account. - type Balance: Parameter + Member + AtLeast32Bit + Codec + Default + Copy + + type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + MaybeSerializeDeserialize + Debug; /// The minimum amount required to keep an account open. @@ -192,7 +192,7 @@ pub trait Subtrait: frame_system::Trait { pub trait Trait: frame_system::Trait { /// The balance of an account. - type Balance: Parameter + Member + AtLeast32Bit + Codec + Default + Copy + + type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + MaybeSerializeDeserialize + Debug; /// Handler for the unbalanced reduction when removing a dust account. diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index 403d9f8444..7d24f89d70 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -157,7 +157,7 @@ use codec::{Decode, Encode, HasCompact, Input, Output, Error as CodecError}; use sp_runtime::{RuntimeDebug, DispatchResult, DispatchError}; use sp_runtime::traits::{ CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, One, Saturating, AtLeast32Bit, - Zero, Bounded, + Zero, Bounded, AtLeast32BitUnsigned }; use sp_std::prelude::*; @@ -165,8 +165,9 @@ use sp_std::{cmp, result, fmt::Debug}; use frame_support::{ decl_event, decl_module, decl_storage, ensure, decl_error, traits::{ - Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, ReservableCurrency, - SignedImbalance, WithdrawReason, WithdrawReasons, TryDrop, BalanceStatus, + Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, + ReservableCurrency, SignedImbalance, WithdrawReason, WithdrawReasons, TryDrop, + BalanceStatus, }, Parameter, StorageMap, }; @@ -178,25 +179,15 @@ mod tests; pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; pub trait Trait: frame_system::Trait { - type Balance: Parameter - + Member - + AtLeast32Bit - + Default - + Copy - + MaybeSerializeDeserialize - + Debug; + type Balance: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + Debug + + MaybeSerializeDeserialize; type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; type Event: From> + Into<::Event>; } pub trait Subtrait: frame_system::Trait { - type Balance: Parameter - + Member - + AtLeast32Bit - + Default - + Copy - + MaybeSerializeDeserialize - + Debug; + type Balance: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + Debug + + MaybeSerializeDeserialize; type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index c1c93910ec..ddbbb52bd2 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -82,7 +82,7 @@ use pallet_session::historical::IdentificationTuple; use sp_runtime::{ offchain::storage::StorageValueRef, RuntimeDebug, - traits::{Convert, Member, Saturating, AtLeast32Bit}, Perbill, + traits::{Convert, Member, Saturating, AtLeast32BitUnsigned}, Perbill, transaction_validity::{ TransactionValidity, ValidTransaction, InvalidTransaction, TransactionSource, TransactionPriority, @@ -160,7 +160,7 @@ struct HeartbeatStatus { pub sent_at: BlockNumber, } -impl HeartbeatStatus { +impl HeartbeatStatus { /// Returns true if heartbeat has been recently sent. /// /// Parameters: diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index 04bfc98357..2161fe20af 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -20,7 +20,7 @@ //! The staking rate in NPoS is the total amount of tokens staked by nominators and validators, //! divided by the total token supply. -use sp_runtime::{Perbill, traits::AtLeast32Bit, curve::PiecewiseLinear}; +use sp_runtime::{Perbill, traits::AtLeast32BitUnsigned, curve::PiecewiseLinear}; /// The total payout to all validators (and their nominators) per era and maximum payout. /// @@ -34,7 +34,7 @@ pub fn compute_total_payout( npos_token_staked: N, total_tokens: N, era_duration: u64 -) -> (N, N) where N: AtLeast32Bit + Clone { +) -> (N, N) where N: AtLeast32BitUnsigned + Clone { // Milliseconds per year for the Julian year (365.25 days). const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 029934d982..fdf3460433 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -306,8 +306,8 @@ use sp_runtime::{ Percent, Perbill, PerU16, PerThing, RuntimeDebug, DispatchError, curve::PiecewiseLinear, traits::{ - Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, AtLeast32Bit, - Dispatchable, + Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, + AtLeast32BitUnsigned, Dispatchable, }, transaction_validity::{ TransactionValidityError, TransactionValidity, ValidTransaction, InvalidTransaction, @@ -493,7 +493,7 @@ pub struct StakingLedger { impl< AccountId, - Balance: HasCompact + Copy + Saturating + AtLeast32Bit, + Balance: HasCompact + Copy + Saturating + AtLeast32BitUnsigned, > StakingLedger { /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. @@ -544,7 +544,7 @@ impl< } impl StakingLedger where - Balance: AtLeast32Bit + Saturating + Copy, + Balance: AtLeast32BitUnsigned + Saturating + Copy, { /// Slash the validator for a given amount of balance. This can grow the value /// of the slash in the case that the validator has less than `minimum_balance` diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 625f216b1b..f25ff67efb 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -25,7 +25,7 @@ use sp_core::u32_trait::Value as U32; use sp_runtime::{ RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, traits::{ MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, - BadOrigin + BadOrigin, AtLeast32BitUnsigned }, }; use crate::dispatch::Parameter; @@ -788,7 +788,7 @@ pub enum SignedImbalance>{ impl< P: Imbalance, N: Imbalance, - B: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, + B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, > SignedImbalance { pub fn zero() -> Self { SignedImbalance::Positive(P::zero()) @@ -851,7 +851,8 @@ impl< /// Abstraction over a fungible assets system. pub trait Currency { /// The balance of an account. - type Balance: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; + type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + + Default; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 810bd2fcb6..f614bc4706 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -136,7 +136,7 @@ use sp_runtime::{ }; use crate::dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, DispatchError}; use sp_runtime::traits::SaturatedConversion; -use sp_arithmetic::{Perbill, traits::{BaseArithmetic, Saturating}}; +use sp_arithmetic::{Perbill, traits::{BaseArithmetic, Saturating, Unsigned}}; use smallvec::{smallvec, SmallVec}; /// Re-export priority as type @@ -571,7 +571,7 @@ pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; /// An implementor should only implement the `polynomial` function. pub trait WeightToFeePolynomial { /// The type that is returned as result from polynomial evaluation. - type Balance: BaseArithmetic + From + Copy; + type Balance: BaseArithmetic + From + Copy + Unsigned; /// Returns a polynomial that describes the weight to fee conversion. /// @@ -611,7 +611,7 @@ pub trait WeightToFeePolynomial { pub struct IdentityFee(sp_std::marker::PhantomData); impl WeightToFeePolynomial for IdentityFee where - T: BaseArithmetic + From + Copy + T: BaseArithmetic + From + Copy + Unsigned { type Balance = T; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 18723fff29..dc103b204d 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -107,7 +107,7 @@ use sp_runtime::{ self, CheckEqual, AtLeast32Bit, Zero, Lookup, LookupError, SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, + Dispatchable, AtLeast32BitUnsigned }, offchain::storage_lock::BlockNumberProvider, }; @@ -181,8 +181,9 @@ pub trait Trait: 'static + Eq + Clone { /// The block number type used by the runtime. type BlockNumber: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + AtLeast32Bit - + Default + Bounded + Copy + sp_std::hash::Hash + sp_std::str::FromStr + MaybeMallocSizeOf; + Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + + AtLeast32BitUnsigned + Default + Bounded + Copy + sp_std::hash::Hash + + sp_std::str::FromStr + MaybeMallocSizeOf; /// The output of the `Hashing` function. type Hash: diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8308c84f91..5e11c8af95 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -51,7 +51,7 @@ use sp_std::prelude::*; use sp_std::fmt::Debug; use codec::{Encode, Decode}; use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ - StaticLookup, Zero, AtLeast32Bit, MaybeSerializeDeserialize, Convert + StaticLookup, Zero, AtLeast32BitUnsigned, MaybeSerializeDeserialize, Convert }}; use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; use frame_support::traits::{ @@ -92,8 +92,8 @@ pub struct VestingInfo { } impl< - Balance: AtLeast32Bit + Copy, - BlockNumber: AtLeast32Bit + Copy, + Balance: AtLeast32BitUnsigned + Copy, + BlockNumber: AtLeast32BitUnsigned + Copy, > VestingInfo { /// Amount locked at block `n`. pub fn locked_at< diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 521f4d1074..f809358446 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -94,7 +94,7 @@ pub trait PerThing: /// ``` fn mul_floor(self, b: N) -> N where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + ops::Div + ops::Mul + ops::Add + Unsigned { overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) } @@ -116,7 +116,7 @@ pub trait PerThing: /// ``` fn mul_ceil(self, b: N) -> N where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + ops::Div + ops::Mul + ops::Add + Unsigned { overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) } @@ -132,7 +132,8 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul(self, b: N) -> N where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) } @@ -151,7 +152,8 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul_floor(self, b: N) -> N where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) } @@ -170,7 +172,8 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul_ceil(self, b: N) -> N where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + ops::Div + ops::Mul + ops::Add + Saturating + + Unsigned { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) } @@ -198,14 +201,14 @@ pub trait PerThing: /// # fn main () { /// // 989/100 is technically closer to 99%. /// assert_eq!( - /// Percent::from_rational_approximation(989, 1000), + /// Percent::from_rational_approximation(989u64, 1000), /// Percent::from_parts(98), /// ); /// # } /// ``` fn from_rational_approximation(p: N, q: N) -> Self where N: Clone + Ord + From + TryInto + TryInto + - ops::Div + ops::Rem + ops::Add; + ops::Div + ops::Rem + ops::Add + Unsigned; } /// The rounding method to use. @@ -227,7 +230,7 @@ fn saturating_reciprocal_mul( ) -> N where N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating, + Output=N> + ops::Add + ops::Rem + Saturating + Unsigned, P: PerThing, { let maximum: N = P::ACCURACY.into(); @@ -248,7 +251,7 @@ fn overflow_prune_mul( ) -> N where N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, + Output=N> + ops::Add + ops::Rem + Unsigned, P: PerThing, { let maximum: N = P::ACCURACY.into(); @@ -274,7 +277,7 @@ fn rational_mul_correction( ) -> N where N: From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, + Output=N> + ops::Add + ops::Rem + Unsigned, P: PerThing, { let numer_upper = P::Upper::from(numer); @@ -335,14 +338,15 @@ macro_rules! implement_per_thing { /// Build this type from a number of parts per thing. fn from_parts(parts: Self::Inner) -> Self { Self(parts.min($max)) } + /// NOTE: saturate to 0 or 1 if x is beyond `[0, 1]` #[cfg(feature = "std")] fn from_fraction(x: f64) -> Self { - Self::from_parts((x * $max as f64) as Self::Inner) + Self::from_parts((x.max(0.).min(1.) * $max as f64) as Self::Inner) } fn from_rational_approximation(p: N, q: N) -> Self where N: Clone + Ord + From + TryInto + TryInto - + ops::Div + ops::Rem + ops::Add + + ops::Div + ops::Rem + ops::Add + Unsigned { let div_ceil = |x: N, f: N| -> N { let mut o = x.clone() / f.clone(); @@ -445,7 +449,8 @@ macro_rules! implement_per_thing { pub fn from_rational_approximation(p: N, q: N) -> Self where N: Clone + Ord + From<$type> + TryInto<$type> + TryInto<$upper_type> + ops::Div + ops::Rem + - ops::Add { + ops::Add + Unsigned + { ::from_rational_approximation(p, q) } @@ -453,7 +458,8 @@ macro_rules! implement_per_thing { pub fn mul_floor(self, b: N) -> N where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + - ops::Add { + ops::Add + Unsigned + { PerThing::mul_floor(self, b) } @@ -461,7 +467,8 @@ macro_rules! implement_per_thing { pub fn mul_ceil(self, b: N) -> N where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + - ops::Add { + ops::Add + Unsigned + { PerThing::mul_ceil(self, b) } @@ -469,7 +476,8 @@ macro_rules! implement_per_thing { pub fn saturating_reciprocal_mul(self, b: N) -> N where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + ops::Add + - Saturating { + Saturating + Unsigned + { PerThing::saturating_reciprocal_mul(self, b) } @@ -477,7 +485,8 @@ macro_rules! implement_per_thing { pub fn saturating_reciprocal_mul_floor(self, b: N) -> N where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + ops::Add + - Saturating { + Saturating + Unsigned + { PerThing::saturating_reciprocal_mul_floor(self, b) } @@ -485,7 +494,8 @@ macro_rules! implement_per_thing { pub fn saturating_reciprocal_mul_ceil(self, b: N) -> N where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + ops::Add + - Saturating { + Saturating + Unsigned + { PerThing::saturating_reciprocal_mul_ceil(self, b) } } @@ -585,7 +595,7 @@ macro_rules! implement_per_thing { impl ops::Mul for $name where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem - + ops::Div + ops::Mul + ops::Add, + + ops::Div + ops::Mul + ops::Add + Unsigned, { type Output = N; fn mul(self, b: N) -> Self::Output { @@ -684,6 +694,8 @@ macro_rules! implement_per_thing { assert_eq!($name::from_fraction(0.0), $name::from_parts(Zero::zero())); assert_eq!($name::from_fraction(0.1), $name::from_parts($max / 10)); assert_eq!($name::from_fraction(1.0), $name::from_parts($max)); + assert_eq!($name::from_fraction(2.0), $name::from_parts($max)); + assert_eq!($name::from_fraction(-1.0), $name::from_parts(Zero::zero())); } macro_rules! u256ify { diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 29b8e419ef..ce645cfe65 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -79,6 +79,11 @@ pub trait AtLeast32Bit: BaseArithmetic + From + From {} impl + From> AtLeast32Bit for T {} +/// A meta trait for arithmetic. Same as [`AtLeast32Bit `], but also bounded to be unsigned. +pub trait AtLeast32BitUnsigned: AtLeast32Bit + Unsigned {} + +impl AtLeast32BitUnsigned for T {} + /// Just like `From` except that if the source value is too big to fit into the destination type /// then it'll saturate the destination. pub trait UniqueSaturatedFrom: Sized { diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index be47b566e9..27eb89a769 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -17,7 +17,7 @@ //! Provides some utilities to define a piecewise linear function. -use crate::{Perbill, traits::{AtLeast32Bit, SaturatedConversion}}; +use crate::{Perbill, traits::{AtLeast32BitUnsigned, SaturatedConversion}}; use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. @@ -36,7 +36,7 @@ fn abs_sub + Clone>(a: N, b: N) -> N where { impl<'a> PiecewiseLinear<'a> { /// Compute `f(n/d)*d` with `n <= d`. This is useful to avoid loss of precision. pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N where - N: AtLeast32Bit + Clone + N: AtLeast32BitUnsigned + Clone { let n = n.min(d.clone()); @@ -80,7 +80,7 @@ impl<'a> PiecewiseLinear<'a> { // This is guaranteed not to overflow on whatever values nor lose precision. // `q` must be superior to zero. fn multiply_by_rational_saturating(value: N, p: u32, q: u32) -> N - where N: AtLeast32Bit + Clone + where N: AtLeast32BitUnsigned + Clone { let q = q.max(1); diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 24cceef2cd..e6c800e578 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -21,7 +21,7 @@ use serde::{Deserialize, Serialize}; use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef, Error}; use crate::traits::{ - self, Member, AtLeast32Bit, SimpleBitOps, Hash as HashT, + self, Member, AtLeast32BitUnsigned, SimpleBitOps, Hash as HashT, MaybeSerializeDeserialize, MaybeSerialize, MaybeDisplay, MaybeMallocSizeOf, }; @@ -123,7 +123,7 @@ impl codec::EncodeLike for Header where impl traits::Header for Header where Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + MaybeDisplay + - AtLeast32Bit + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + + AtLeast32BitUnsigned + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + MaybeMallocSizeOf, Hash: HashT, Hash::Output: Default + sp_std::hash::Hash + Copy + Member + Ord + @@ -171,7 +171,8 @@ impl traits::Header for Header where } impl Header where - Number: Member + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32Bit + Codec + Into + TryFrom, + Number: Member + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned + Codec + + Into + TryFrom, Hash: HashT, Hash::Output: Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, { diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 4718d2e3dd..9d4e671db6 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -61,7 +61,7 @@ //! ``` use crate::offchain::storage::StorageValueRef; -use crate::traits::AtLeast32Bit; +use crate::traits::AtLeast32BitUnsigned; use codec::{Codec, Decode, Encode}; use sp_core::offchain::{Duration, Timestamp}; use sp_io::offchain; @@ -430,7 +430,7 @@ where /// used with [`BlockAndTime`](BlockAndTime). pub trait BlockNumberProvider { /// Type of `BlockNumber` to provide. - type BlockNumber: Codec + Clone + Ord + Eq + AtLeast32Bit; + type BlockNumber: Codec + Clone + Ord + Eq + AtLeast32BitUnsigned; /// Returns the current block number. /// /// Provides an abstraction over an arbitrary way of providing the diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index b1739269e6..4d2b1f062f 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -34,8 +34,8 @@ use crate::transaction_validity::{ }; use crate::generic::{Digest, DigestItem}; pub use sp_arithmetic::traits::{ - AtLeast32Bit, UniqueSaturatedInto, UniqueSaturatedFrom, Saturating, SaturatedConversion, - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, + AtLeast32Bit, AtLeast32BitUnsigned, UniqueSaturatedInto, UniqueSaturatedFrom, Saturating, + SaturatedConversion, Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedShl, CheckedShr, IntegerSquareRoot }; use sp_application_crypto::AppKey; @@ -490,9 +490,8 @@ pub trait Header: MaybeMallocSizeOf + 'static { /// Header number. - type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + Copy + MaybeDisplay + AtLeast32Bit + Codec + sp_std::str::FromStr - + MaybeMallocSizeOf; + type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Copy + + MaybeDisplay + AtLeast32BitUnsigned + Codec + sp_std::str::FromStr + MaybeMallocSizeOf; /// Header hash type type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> -- GitLab From a5f38dc479c0f0eaa84f2c44b56eac924b761343 Mon Sep 17 00:00:00 2001 From: Shaopeng Wang Date: Thu, 25 Jun 2020 21:33:47 +1200 Subject: [PATCH 078/144] Update stale docstring with 'EnsureOneOf' introduced. (#6501) * Update stale docstring with 'EnsureOneOf' introduced. * Apply review suggestions. --- frame/identity/src/lib.rs | 4 ++-- frame/membership/src/lib.rs | 12 ++++++++---- frame/nicks/src/lib.rs | 4 ++-- frame/scored-pool/src/lib.rs | 4 ++-- frame/staking/src/lib.rs | 2 +- frame/treasury/src/lib.rs | 6 +++++- 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index d657e3d793..2768340403 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -622,7 +622,7 @@ decl_module! { /// Add a registrar to the system. /// - /// The dispatch origin for this call must be `RegistrarOrigin` or `Root`. + /// The dispatch origin for this call must be `T::RegistrarOrigin`. /// /// - `account`: the account of the registrar. /// @@ -1087,7 +1087,7 @@ decl_module! { /// `Slash`. Verification request deposits are not returned; they should be cancelled /// manually using `cancel_request`. /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// The dispatch origin for this call must match `T::ForceOrigin`. /// /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 62b1217c83..71b0902838 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -117,7 +117,7 @@ decl_module! { /// Add a member `who` to the set. /// - /// May only be called from `AddOrigin` or root. + /// May only be called from `T::AddOrigin`. #[weight = 50_000_000] pub fn add_member(origin, who: T::AccountId) { T::AddOrigin::ensure_origin(origin)?; @@ -134,7 +134,7 @@ decl_module! { /// Remove a member `who` from the set. /// - /// May only be called from `RemoveOrigin` or root. + /// May only be called from `T::RemoveOrigin`. #[weight = 50_000_000] pub fn remove_member(origin, who: T::AccountId) { T::RemoveOrigin::ensure_origin(origin)?; @@ -152,7 +152,7 @@ decl_module! { /// Swap out one member `remove` for another `add`. /// - /// May only be called from `SwapOrigin` or root. + /// May only be called from `T::SwapOrigin`. /// /// Prime membership is *not* passed from `remove` to `add`, if extant. #[weight = 50_000_000] @@ -181,7 +181,7 @@ decl_module! { /// Change the membership to a new set, disregarding the existing membership. Be nice and /// pass `members` pre-sorted. /// - /// May only be called from `ResetOrigin` or root. + /// May only be called from `T::ResetOrigin`. #[weight = 50_000_000] pub fn reset_members(origin, members: Vec) { T::ResetOrigin::ensure_origin(origin)?; @@ -231,6 +231,8 @@ decl_module! { } /// Set the prime member. Must be a current member. + /// + /// May only be called from `T::PrimeOrigin`. #[weight = 50_000_000] pub fn set_prime(origin, who: T::AccountId) { T::PrimeOrigin::ensure_origin(origin)?; @@ -240,6 +242,8 @@ decl_module! { } /// Remove the prime member if it exists. + /// + /// May only be called from `T::PrimeOrigin`. #[weight = 50_000_000] pub fn clear_prime(origin) { T::PrimeOrigin::ensure_origin(origin)?; diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 8a130da2ae..93c6081941 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -187,7 +187,7 @@ decl_module! { /// Fails if `who` has not been named. The deposit is dealt with through `T::Slashed` /// imbalance handler. /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// The dispatch origin for this call must match `T::ForceOrigin`. /// /// # /// - O(1). @@ -213,7 +213,7 @@ decl_module! { /// /// No length checking is done on the name. /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// The dispatch origin for this call must match `T::ForceOrigin`. /// /// # /// - O(1). diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 5131a663e0..81ee92aeb4 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -308,7 +308,7 @@ decl_module! { /// Kick a member `who` from the set. /// - /// May only be called from `KickOrigin` or root. + /// May only be called from `T::KickOrigin`. /// /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. @@ -331,7 +331,7 @@ decl_module! { /// Score a member `who` with `score`. /// - /// May only be called from `ScoreOrigin` or root. + /// May only be called from `T::ScoreOrigin`. /// /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index fdf3460433..2a6e5b1a2d 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1928,7 +1928,7 @@ decl_module! { /// Cancel enactment of a deferred slash. /// - /// Can be called by either the root origin or the `T::SlashCancelOrigin`. + /// Can be called by the `T::SlashCancelOrigin`. /// /// Parameters: era and indices of the slashes for that era to kill. /// diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 861a652e52..e67ace5475 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -316,7 +316,7 @@ decl_module! { /// The amount held on deposit per byte within the tip report reason. const TipReportDepositPerByte: BalanceOf = T::TipReportDepositPerByte::get(); - + /// The treasury's module id, used for deriving its sovereign account ID. const ModuleId: ModuleId = T::ModuleId::get(); @@ -355,6 +355,8 @@ decl_module! { /// Reject a proposed spend. The original deposit will be slashed. /// + /// May only be called from `T::RejectOrigin`. + /// /// # /// - Complexity: O(1) /// - DbReads: `Proposals`, `rejected proposer account` @@ -375,6 +377,8 @@ decl_module! { /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary /// and the original deposit will be returned. /// + /// May only be called from `T::ApproveOrigin`. + /// /// # /// - Complexity: O(1). /// - DbReads: `Proposals`, `Approvals` -- GitLab From 95747db3f8e145d5ff644f81b732cb852417926a Mon Sep 17 00:00:00 2001 From: s3krit Date: Thu, 25 Jun 2020 11:56:45 +0200 Subject: [PATCH 079/144] Add auth-label-issues.yml (#6488) --- .github/workflows/auto-label-issues.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/auto-label-issues.yml diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml new file mode 100644 index 0000000000..ce0bad59d1 --- /dev/null +++ b/.github/workflows/auto-label-issues.yml @@ -0,0 +1,17 @@ +# If the author of the issues is not a contributor to the project, label +# the issue with 'Z0-unconfirmed' + +name: Label New Issues +on: + issues: + types: [opened] + +jobs: + label-new-issues: + runs-on: ubuntu-latest + steps: + - name: Label drafts + uses: andymckay/labeler@master + if: github.event.issue.author_association == "NONE" + with: + add-labels: 'Z0-unconfirmed' -- GitLab From b3fac7b265c2ccfc28afab68ee6b72f564aa6d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 25 Jun 2020 15:03:29 +0200 Subject: [PATCH 080/144] Remove /self from mandatory rpc reviews. (#6507) --- docs/CODEOWNERS | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 2fb85a4ba1..b86846aefe 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -33,12 +33,6 @@ /client/offchain/ @tomusdrw /primitives/offchain/ @tomusdrw -# Everything that has RPC in it -/bin/node/rpc/ @tomusdrw -/bin/node/rpc-client/ @tomusdrw -/client/rpc/ @tomusdrw -/primitives/rpc/ @tomusdrw - # GRANDPA, BABE, consensus stuff /frame/babe/ @andresilva /frame/grandpa/ @andresilva @@ -54,7 +48,7 @@ # EVM /frame/evm/ @sorpaas -# NPoS and election +# NPoS and election /frame/staking/ @kianenigma /frame/elections/ @kianenigma /frame/elections-phragmen/ @kianenigma -- GitLab From f5a5937b1413e6d8a59003e2d0fe6f6efb0f1d46 Mon Sep 17 00:00:00 2001 From: Ricardo Rius <9488369+riusricardo@users.noreply.github.com> Date: Thu, 25 Jun 2020 17:46:18 +0200 Subject: [PATCH 081/144] Change contract fees to MILLICENTS (#6509) --- bin/node/runtime/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8b6831b41e..e3c9c2b95f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -572,10 +572,10 @@ impl pallet_treasury::Trait for Runtime { } parameter_types! { - pub const TombstoneDeposit: Balance = 1 * DOLLARS; - pub const RentByteFee: Balance = 1 * DOLLARS; - pub const RentDepositOffset: Balance = 1000 * DOLLARS; - pub const SurchargeReward: Balance = 150 * DOLLARS; + pub const TombstoneDeposit: Balance = 16 * MILLICENTS; + pub const RentByteFee: Balance = 4 * MILLICENTS; + pub const RentDepositOffset: Balance = 1000 * MILLICENTS; + pub const SurchargeReward: Balance = 150 * MILLICENTS; } impl pallet_contracts::Trait for Runtime { -- GitLab From a1877dcc13ccc695ed16fc4ff36b45113d6ff048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 25 Jun 2020 22:52:05 +0200 Subject: [PATCH 082/144] Re-enter runtime after resetting overlay from runtime (#6513) This still assumes that the client did not start any transactions before calling into runtime. This is the case for benchmarking as long as either NativeWhenPossible or AlwaysWasm exection strategy is chosen. Using any other will result in a panic. --- primitives/state-machine/src/ext.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index e25a08adb0..cd4f83661b 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -575,6 +575,9 @@ where ).expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); + self.overlay + .enter_runtime() + .expect("We have reset the overlay above, so we can not be in the runtime; qed"); } fn commit(&mut self) { @@ -593,6 +596,9 @@ where changes.main_storage_changes, ).expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); + self.overlay + .enter_runtime() + .expect("We have reset the overlay above, so we can not be in the runtime; qed"); } fn read_write_count(&self) -> (u32, u32, u32, u32) { -- GitLab From 00768a1f21a579c478fe5d4f51e1fa71f7db9fd4 Mon Sep 17 00:00:00 2001 From: Benjamin Kampmann Date: Thu, 25 Jun 2020 23:18:43 +0200 Subject: [PATCH 083/144] =?UTF-8?q?Releasing=20rc4=20=E2=80=93=20Rhinocero?= =?UTF-8?q?s=20(#6515)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Martin Pugh --- .maintain/gitlab/generate_changelog.sh | 11 +- .maintain/gitlab/lib.sh | 2 +- Cargo.lock | 364 +++++++++--------- bin/node-template/node/Cargo.toml | 40 +- bin/node-template/pallets/template/Cargo.toml | 12 +- bin/node-template/runtime/Cargo.toml | 48 +-- bin/node/bench/Cargo.toml | 20 +- bin/node/browser-testing/Cargo.toml | 6 +- bin/node/cli/Cargo.toml | 118 +++--- bin/node/executor/Cargo.toml | 50 +-- bin/node/inspect/Cargo.toml | 14 +- bin/node/primitives/Cargo.toml | 12 +- bin/node/rpc-client/Cargo.toml | 6 +- bin/node/rpc/Cargo.toml | 42 +- bin/node/runtime/Cargo.toml | 116 +++--- bin/node/testing/Cargo.toml | 68 ++-- bin/utils/chain-spec-builder/Cargo.toml | 10 +- bin/utils/subkey/Cargo.toml | 20 +- client/api/Cargo.toml | 44 +-- client/authority-discovery/Cargo.toml | 24 +- client/basic-authorship/Cargo.toml | 30 +- client/block-builder/Cargo.toml | 20 +- client/chain-spec/Cargo.toml | 14 +- client/chain-spec/derive/Cargo.toml | 2 +- client/cli/Cargo.toml | 32 +- client/consensus/aura/Cargo.toml | 50 +-- client/consensus/babe/Cargo.toml | 58 +-- client/consensus/babe/rpc/Cargo.toml | 30 +- client/consensus/common/Cargo.toml | 10 +- client/consensus/epochs/Cargo.toml | 10 +- client/consensus/manual-seal/Cargo.toml | 26 +- client/consensus/pow/Cargo.toml | 24 +- client/consensus/slots/Cargo.toml | 24 +- client/consensus/uncles/Cargo.toml | 14 +- client/db/Cargo.toml | 28 +- client/executor/Cargo.toml | 40 +- client/executor/common/Cargo.toml | 12 +- client/executor/runtime-test/Cargo.toml | 14 +- client/executor/wasmi/Cargo.toml | 12 +- client/executor/wasmtime/Cargo.toml | 12 +- client/finality-grandpa/Cargo.toml | 56 +-- client/finality-grandpa/rpc/Cargo.toml | 6 +- client/informant/Cargo.toml | 10 +- client/keystore/Cargo.toml | 6 +- client/light/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 8 +- client/network/Cargo.toml | 32 +- client/network/test/Cargo.toml | 26 +- client/offchain/Cargo.toml | 26 +- client/peerset/Cargo.toml | 4 +- client/proposer-metrics/Cargo.toml | 4 +- client/rpc-api/Cargo.toml | 14 +- client/rpc-servers/Cargo.toml | 4 +- client/rpc/Cargo.toml | 44 +-- client/service/Cargo.toml | 70 ++-- client/service/test/Cargo.toml | 42 +- client/state-db/Cargo.toml | 6 +- client/telemetry/Cargo.toml | 2 +- client/tracing/Cargo.toml | 4 +- client/transaction-pool/Cargo.toml | 32 +- client/transaction-pool/graph/Cargo.toml | 14 +- docs/CHANGELOG.md | 54 +++ frame/assets/Cargo.toml | 14 +- frame/atomic-swap/Cargo.toml | 16 +- frame/aura/Cargo.toml | 26 +- frame/authority-discovery/Cargo.toml | 22 +- frame/authorship/Cargo.toml | 18 +- frame/babe/Cargo.toml | 30 +- frame/balances/Cargo.toml | 18 +- frame/benchmark/Cargo.toml | 14 +- frame/benchmarking/Cargo.toml | 16 +- frame/collective/Cargo.toml | 18 +- frame/contracts/Cargo.toml | 24 +- frame/contracts/common/Cargo.toml | 6 +- frame/contracts/rpc/Cargo.toml | 16 +- frame/contracts/rpc/runtime-api/Cargo.toml | 10 +- frame/democracy/Cargo.toml | 24 +- frame/elections-phragmen/Cargo.toml | 22 +- frame/elections/Cargo.toml | 16 +- frame/evm/Cargo.toml | 18 +- frame/example-offchain-worker/Cargo.toml | 14 +- frame/example/Cargo.toml | 18 +- frame/executive/Cargo.toml | 26 +- frame/finality-tracker/Cargo.toml | 18 +- frame/generic-asset/Cargo.toml | 14 +- frame/grandpa/Cargo.toml | 38 +- frame/identity/Cargo.toml | 18 +- frame/im-online/Cargo.toml | 24 +- frame/indices/Cargo.toml | 20 +- frame/membership/Cargo.toml | 14 +- frame/metadata/Cargo.toml | 6 +- frame/multisig/Cargo.toml | 20 +- frame/nicks/Cargo.toml | 16 +- frame/offences/Cargo.toml | 18 +- frame/offences/benchmarking/Cargo.toml | 36 +- frame/proxy/Cargo.toml | 22 +- frame/randomness-collective-flip/Cargo.toml | 14 +- frame/recovery/Cargo.toml | 16 +- frame/scheduler/Cargo.toml | 16 +- frame/scored-pool/Cargo.toml | 16 +- frame/session/Cargo.toml | 24 +- frame/session/benchmarking/Cargo.toml | 26 +- frame/society/Cargo.toml | 16 +- frame/staking/Cargo.toml | 38 +- frame/staking/fuzzer/Cargo.toml | 26 +- frame/staking/reward-curve/Cargo.toml | 4 +- frame/sudo/Cargo.toml | 14 +- frame/support/Cargo.toml | 24 +- frame/support/procedural/Cargo.toml | 4 +- frame/support/procedural/tools/Cargo.toml | 4 +- .../procedural/tools/derive/Cargo.toml | 2 +- frame/support/test/Cargo.toml | 16 +- frame/system/Cargo.toml | 18 +- frame/system/benchmarking/Cargo.toml | 16 +- frame/system/rpc/runtime-api/Cargo.toml | 4 +- frame/timestamp/Cargo.toml | 22 +- frame/transaction-payment/Cargo.toml | 20 +- frame/transaction-payment/rpc/Cargo.toml | 14 +- .../rpc/runtime-api/Cargo.toml | 10 +- frame/treasury/Cargo.toml | 18 +- frame/utility/Cargo.toml | 20 +- frame/vesting/Cargo.toml | 20 +- primitives/allocator/Cargo.toml | 8 +- primitives/api/Cargo.toml | 16 +- primitives/api/proc-macro/Cargo.toml | 2 +- primitives/api/test/Cargo.toml | 22 +- primitives/application-crypto/Cargo.toml | 8 +- primitives/application-crypto/test/Cargo.toml | 12 +- primitives/arithmetic/Cargo.toml | 6 +- primitives/arithmetic/fuzzer/Cargo.toml | 4 +- primitives/authority-discovery/Cargo.toml | 10 +- primitives/authorship/Cargo.toml | 8 +- primitives/block-builder/Cargo.toml | 10 +- primitives/blockchain/Cargo.toml | 10 +- primitives/chain-spec/Cargo.toml | 2 +- primitives/consensus/aura/Cargo.toml | 14 +- primitives/consensus/babe/Cargo.toml | 20 +- primitives/consensus/common/Cargo.toml | 20 +- primitives/consensus/pow/Cargo.toml | 10 +- primitives/consensus/vrf/Cargo.toml | 8 +- primitives/core/Cargo.toml | 14 +- primitives/database/Cargo.toml | 2 +- primitives/debug-derive/Cargo.toml | 2 +- primitives/externalities/Cargo.toml | 6 +- primitives/finality-grandpa/Cargo.toml | 12 +- primitives/finality-tracker/Cargo.toml | 6 +- primitives/inherents/Cargo.toml | 6 +- primitives/io/Cargo.toml | 18 +- primitives/keyring/Cargo.toml | 6 +- primitives/npos-elections/Cargo.toml | 12 +- primitives/npos-elections/compact/Cargo.toml | 2 +- primitives/npos-elections/fuzzer/Cargo.toml | 6 +- primitives/offchain/Cargo.toml | 10 +- primitives/panic-handler/Cargo.toml | 2 +- primitives/rpc/Cargo.toml | 4 +- primitives/runtime-interface/Cargo.toml | 20 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../test-wasm-deprecated/Cargo.toml | 10 +- .../runtime-interface/test-wasm/Cargo.toml | 10 +- primitives/runtime-interface/test/Cargo.toml | 18 +- primitives/runtime/Cargo.toml | 16 +- primitives/sandbox/Cargo.toml | 10 +- primitives/serializer/Cargo.toml | 2 +- primitives/session/Cargo.toml | 12 +- primitives/staking/Cargo.toml | 6 +- primitives/state-machine/Cargo.toml | 12 +- primitives/std/Cargo.toml | 2 +- primitives/storage/Cargo.toml | 6 +- primitives/test-primitives/Cargo.toml | 8 +- primitives/timestamp/Cargo.toml | 10 +- primitives/tracing/Cargo.toml | 2 +- primitives/transaction-pool/Cargo.toml | 10 +- primitives/trie/Cargo.toml | 8 +- primitives/utils/Cargo.toml | 2 +- primitives/version/Cargo.toml | 6 +- primitives/wasm-interface/Cargo.toml | 4 +- test-utils/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 26 +- test-utils/runtime/Cargo.toml | 60 +-- test-utils/runtime/client/Cargo.toml | 26 +- .../runtime/transaction-pool/Cargo.toml | 12 +- utils/browser/Cargo.toml | 12 +- utils/build-script-utils/Cargo.toml | 2 +- utils/fork-tree/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 20 +- utils/frame/rpc/support/Cargo.toml | 10 +- utils/frame/rpc/system/Cargo.toml | 24 +- utils/prometheus/Cargo.toml | 2 +- 188 files changed, 1876 insertions(+), 1823 deletions(-) diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh index ba2a507e4c..b872d32443 100755 --- a/.maintain/gitlab/generate_changelog.sh +++ b/.maintain/gitlab/generate_changelog.sh @@ -19,18 +19,17 @@ while IFS= read -r line; do if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then continue fi - if has_label 'paritytech/substrate' "$pr_id" 'B1-runtimenoteworthy'; then - runtime_changes="$runtime_changes + if has_label 'paritytech/substrate' "$pr_id" 'B3-apinoteworthy' ; then + api_changes="$api_changes $line" fi - if has_label 'paritytech/substrate' "$pr_id" 'B1-clientnoteworthy'; then + if has_label 'paritytech/substrate' "$pr_id" 'B5-clientnoteworthy'; then client_changes="$client_changes $line" fi - if has_label 'paritytech/substrate' "$pr_id" 'B1-apinoteworthy' ; then - api_changes="$api_changes + if has_label 'paritytech/substrate' "$pr_id" 'B7-runtimenoteworthy'; then + runtime_changes="$runtime_changes $line" - continue fi done <<< "$all_changes" diff --git a/.maintain/gitlab/lib.sh b/.maintain/gitlab/lib.sh index a7a83baaea..33477b52f5 100755 --- a/.maintain/gitlab/lib.sh +++ b/.maintain/gitlab/lib.sh @@ -5,7 +5,7 @@ api_base="https://api.github.com/repos" # Function to take 2 git tags/commits and get any lines from commit messages # that contain something that looks like a PR reference: e.g., (#1234) sanitised_git_logs(){ - git --no-pager log --pretty=format:"%s" "$1..$2" | + git --no-pager log --pretty=format:"%s" "$1...$2" | # Only find messages referencing a PR grep -E '\(#[0-9]+\)' | # Strip any asterisks diff --git a/Cargo.lock b/Cargo.lock index 1520373790..89b24d0826 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -568,7 +568,7 @@ dependencies = [ [[package]] name = "chain-spec-builder" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "ansi_term 0.12.1", "node-cli", @@ -1377,14 +1377,14 @@ checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" [[package]] name = "fork-tree" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -1400,7 +1400,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "parity-scale-codec", @@ -1417,7 +1417,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -1437,7 +1437,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0-rc3" +version = "11.0.0-rc4" dependencies = [ "parity-scale-codec", "serde", @@ -1447,7 +1447,7 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "bitmask", "frame-metadata", @@ -1473,7 +1473,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support-procedural-tools", "proc-macro2", @@ -1483,7 +1483,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -1494,7 +1494,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", @@ -1503,7 +1503,7 @@ dependencies = [ [[package]] name = "frame-support-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "parity-scale-codec", @@ -1521,7 +1521,7 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.2.11", "frame-support", @@ -1539,7 +1539,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -1554,7 +1554,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -3286,7 +3286,7 @@ dependencies = [ [[package]] name = "node-bench" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "fs_extra", @@ -3316,7 +3316,7 @@ dependencies = [ [[package]] name = "node-browser-testing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", @@ -3333,7 +3333,7 @@ dependencies = [ [[package]] name = "node-cli" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "assert_cmd", "frame-benchmarking-cli", @@ -3407,7 +3407,7 @@ dependencies = [ [[package]] name = "node-executor" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.3.1", "frame-benchmarking", @@ -3441,7 +3441,7 @@ dependencies = [ [[package]] name = "node-inspect" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "log", @@ -3457,7 +3457,7 @@ dependencies = [ [[package]] name = "node-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-system", "parity-scale-codec", @@ -3470,7 +3470,7 @@ dependencies = [ [[package]] name = "node-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "jsonrpc-core", "node-primitives", @@ -3497,7 +3497,7 @@ dependencies = [ [[package]] name = "node-rpc-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "env_logger 0.7.1", "futures 0.1.29", @@ -3510,7 +3510,7 @@ dependencies = [ [[package]] name = "node-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3579,7 +3579,7 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "log", @@ -3608,7 +3608,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-executive", "frame-support", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "node-testing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.3.1", "frame-support", @@ -3843,7 +3843,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3857,7 +3857,7 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3872,7 +3872,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3894,7 +3894,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3912,7 +3912,7 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3928,7 +3928,7 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -3950,7 +3950,7 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "pallet-benchmark" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -3980,7 +3980,7 @@ dependencies = [ [[package]] name = "pallet-collective" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -3997,7 +3997,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "assert_matches", "frame-support", @@ -4023,7 +4023,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -4032,7 +4032,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4051,7 +4051,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4062,7 +4062,7 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4082,7 +4082,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4098,7 +4098,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4117,7 +4117,7 @@ dependencies = [ [[package]] name = "pallet-evm" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "evm", "frame-support", @@ -4137,7 +4137,7 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4153,7 +4153,7 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4168,7 +4168,7 @@ dependencies = [ [[package]] name = "pallet-finality-tracker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4185,7 +4185,7 @@ dependencies = [ [[package]] name = "pallet-generic-asset" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4199,7 +4199,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "finality-grandpa", "frame-support", @@ -4226,7 +4226,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4243,7 +4243,7 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4262,7 +4262,7 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4279,7 +4279,7 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4293,7 +4293,7 @@ dependencies = [ [[package]] name = "pallet-multisig" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4309,7 +4309,7 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4324,7 +4324,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4340,7 +4340,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4365,7 +4365,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4382,7 +4382,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4396,7 +4396,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "enumflags2", "frame-support", @@ -4412,7 +4412,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4427,7 +4427,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4442,7 +4442,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4463,7 +4463,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4483,7 +4483,7 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4499,7 +4499,7 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "env_logger 0.7.1", "frame-benchmarking", @@ -4550,7 +4550,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -4561,7 +4561,7 @@ dependencies = [ [[package]] name = "pallet-sudo" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4575,7 +4575,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4587,7 +4587,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4605,7 +4605,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -4623,7 +4623,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4640,7 +4640,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "parity-scale-codec", @@ -4653,7 +4653,7 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4669,7 +4669,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-benchmarking", "frame-support", @@ -4685,7 +4685,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5829,7 +5829,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "bytes 0.5.4", "derive_more", @@ -5859,7 +5859,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", @@ -5885,7 +5885,7 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -5902,7 +5902,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "impl-trait-for-tuples", "sc-chain-spec-derive", @@ -5917,7 +5917,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5927,7 +5927,7 @@ dependencies = [ [[package]] name = "sc-cli" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "ansi_term 0.12.1", "atty", @@ -5967,7 +5967,7 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "fnv", @@ -6005,7 +6005,7 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "blake2-rfc", "env_logger 0.7.1", @@ -6038,7 +6038,7 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "sc-client-api", "sp-blockchain", @@ -6048,7 +6048,7 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "env_logger 0.7.1", @@ -6086,7 +6086,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "env_logger 0.7.1", @@ -6137,7 +6137,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -6165,7 +6165,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6177,7 +6177,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "derive_more", @@ -6207,7 +6207,7 @@ dependencies = [ [[package]] name = "sc-consensus-pow" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -6228,7 +6228,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", @@ -6250,7 +6250,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "log", "sc-client-api", @@ -6263,7 +6263,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "derive_more", @@ -6301,7 +6301,7 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "log", @@ -6317,7 +6317,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "log", "parity-scale-codec", @@ -6331,7 +6331,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "cranelift-codegen", @@ -6352,7 +6352,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "derive_more", @@ -6397,7 +6397,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "finality-grandpa", @@ -6414,7 +6414,7 @@ dependencies = [ [[package]] name = "sc-informant" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "ansi_term 0.12.1", "futures 0.3.4", @@ -6432,7 +6432,7 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "hex", @@ -6448,7 +6448,7 @@ dependencies = [ [[package]] name = "sc-light" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "hash-db", "lazy_static", @@ -6466,7 +6466,7 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "async-std", @@ -6526,7 +6526,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "async-std", "futures 0.3.4", @@ -6544,7 +6544,7 @@ dependencies = [ [[package]] name = "sc-network-test" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "env_logger 0.7.1", "futures 0.3.4", @@ -6570,7 +6570,7 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "bytes 0.5.4", "env_logger 0.7.1", @@ -6603,7 +6603,7 @@ dependencies = [ [[package]] name = "sc-peerset" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "libp2p", @@ -6616,7 +6616,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -6624,7 +6624,7 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "assert_matches", "futures 0.1.29", @@ -6663,7 +6663,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -6686,7 +6686,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "jsonrpc-core", "jsonrpc-http-server", @@ -6701,7 +6701,7 @@ dependencies = [ [[package]] name = "sc-runtime-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sp-allocator", "sp-core", @@ -6714,7 +6714,7 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "directories", @@ -6780,7 +6780,7 @@ dependencies = [ [[package]] name = "sc-service-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "env_logger 0.7.1", "fdlimit", @@ -6816,7 +6816,7 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "env_logger 0.7.1", "log", @@ -6830,7 +6830,7 @@ dependencies = [ [[package]] name = "sc-telemetry" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "bytes 0.5.4", "futures 0.3.4", @@ -6851,7 +6851,7 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "erased-serde", "log", @@ -6868,7 +6868,7 @@ dependencies = [ [[package]] name = "sc-transaction-graph" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "assert_matches", "criterion 0.3.1", @@ -6891,7 +6891,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "assert_matches", "derive_more", @@ -7264,7 +7264,7 @@ dependencies = [ [[package]] name = "sp-allocator" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "log", @@ -7275,7 +7275,7 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "hash-db", "parity-scale-codec", @@ -7290,7 +7290,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "blake2-rfc", "proc-macro-crate", @@ -7301,7 +7301,7 @@ dependencies = [ [[package]] name = "sp-api-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.3.1", "parity-scale-codec", @@ -7320,7 +7320,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "serde", @@ -7331,7 +7331,7 @@ dependencies = [ [[package]] name = "sp-application-crypto-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sp-api", "sp-application-crypto", @@ -7342,7 +7342,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.3.1", "integer-sqrt", @@ -7358,7 +7358,7 @@ dependencies = [ [[package]] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "honggfuzz", "num-bigint", @@ -7369,7 +7369,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -7380,7 +7380,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7390,7 +7390,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -7401,7 +7401,7 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "log", @@ -7416,7 +7416,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "serde", "serde_json", @@ -7424,7 +7424,7 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -7448,7 +7448,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -7461,7 +7461,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "merlin", "parity-scale-codec", @@ -7478,7 +7478,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -7489,7 +7489,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -7500,7 +7500,7 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "base58", "blake2-rfc", @@ -7547,7 +7547,7 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "kvdb", "parking_lot 0.10.2", @@ -7555,7 +7555,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", @@ -7564,7 +7564,7 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "environmental", "parity-scale-codec", @@ -7574,7 +7574,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "finality-grandpa", "log", @@ -7589,7 +7589,7 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7598,7 +7598,7 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "parity-scale-codec", @@ -7609,7 +7609,7 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "hash-db", @@ -7629,7 +7629,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "lazy_static", "sp-core", @@ -7639,7 +7639,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "rand 0.7.3", @@ -7653,7 +7653,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7674,7 +7674,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sp-api", "sp-core", @@ -7684,7 +7684,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "backtrace", "log", @@ -7692,7 +7692,7 @@ dependencies = [ [[package]] name = "sp-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "serde", "serde_json", @@ -7701,7 +7701,7 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "either", "hash256-std-hasher", @@ -7724,7 +7724,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "primitive-types", @@ -7744,7 +7744,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "Inflector", "proc-macro-crate", @@ -7755,7 +7755,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sc-executor", "sp-core", @@ -7770,7 +7770,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sp-core", "sp-io", @@ -7781,7 +7781,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "sp-core", "sp-io", @@ -7792,7 +7792,7 @@ dependencies = [ [[package]] name = "sp-sandbox" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "assert_matches", "parity-scale-codec", @@ -7806,7 +7806,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "serde", "serde_json", @@ -7814,7 +7814,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-api", @@ -7826,7 +7826,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -7835,7 +7835,7 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "hash-db", "hex-literal", @@ -7858,11 +7858,11 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0-rc3" +version = "2.0.0-rc4" [[package]] name = "sp-storage" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "impl-serde 0.2.3", "ref-cast", @@ -7873,7 +7873,7 @@ dependencies = [ [[package]] name = "sp-test-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", "parity-util-mem", @@ -7885,7 +7885,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7898,7 +7898,7 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "log", "rental", @@ -7907,7 +7907,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -7922,7 +7922,7 @@ dependencies = [ [[package]] name = "sp-trie" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "criterion 0.2.11", "hash-db", @@ -7940,7 +7940,7 @@ dependencies = [ [[package]] name = "sp-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "futures-core", @@ -7951,7 +7951,7 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "impl-serde 0.2.3", "parity-scale-codec", @@ -7962,7 +7962,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8077,7 +8077,7 @@ dependencies = [ [[package]] name = "subkey" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "clap", "derive_more", @@ -8119,7 +8119,7 @@ dependencies = [ [[package]] name = "substrate-browser-utils" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "chrono", "clear_on_drop", @@ -8145,14 +8145,14 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "platforms", ] [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", @@ -8168,7 +8168,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "env_logger 0.7.1", "frame-system-rpc-runtime-api", @@ -8193,7 +8193,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0-rc3" +version = "0.8.0-rc4" dependencies = [ "async-std", "derive_more", @@ -8206,7 +8206,7 @@ dependencies = [ [[package]] name = "substrate-test-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "hash-db", @@ -8227,7 +8227,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "cfg-if", "frame-executive", @@ -8270,7 +8270,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "futures 0.3.4", "parity-scale-codec", @@ -8290,7 +8290,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" dependencies = [ "derive_more", "futures 0.3.4", @@ -8305,7 +8305,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" [[package]] name = "substrate-wasm-builder" diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 52fc1b4f8d..6689062390 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Anonymous"] description = "Substrate Node template" edition = "2018" @@ -21,25 +21,25 @@ log = "0.4.8" structopt = "0.3.8" parking_lot = "0.10.0" -sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.8.0-rc3", path = "../../../client/service", features = ["wasmtime"] } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0-rc3", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8.0-rc3", path = "../../../client/network" } -sc-consensus-aura = { version = "0.8.0-rc3", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0-rc3", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.8.0-rc3", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "2.0.0-rc3", path = "../../../primitives/finality-grandpa" } -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc3"} +sc-cli = { version = "0.8.0-rc4", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sc-executor = { version = "0.8.0-rc4", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.8.0-rc4", path = "../../../client/service", features = ["wasmtime"] } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8.0-rc4", path = "../../../client/network" } +sc-consensus-aura = { version = "0.8.0-rc4", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8.0-rc4", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.8.0-rc4", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "2.0.0-rc4", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-rc4"} -node-template-runtime = { version = "2.0.0-rc3", path = "../runtime" } +node-template-runtime = { version = "2.0.0-rc4", path = "../runtime" } [build-dependencies] -substrate-build-script-utils = { version = "2.0.0-rc3", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "2.0.0-rc4", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 714c9d93a9..442fb72030 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -2,7 +2,7 @@ authors = ['Anonymous'] edition = '2018' name = 'pallet-template' -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" @@ -16,27 +16,27 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = [dependencies.frame-support] default-features = false -version = "2.0.0-rc3" +version = "2.0.0-rc4" path = "../../../../frame/support" [dependencies.frame-system] default-features = false -version = "2.0.0-rc3" +version = "2.0.0-rc4" path = "../../../../frame/system" [dev-dependencies.sp-core] default-features = false -version = "2.0.0-rc3" +version = "2.0.0-rc4" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = "2.0.0-rc3" +version = "2.0.0-rc4" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = "2.0.0-rc3" +version = "2.0.0-rc4" path = "../../../../primitives/runtime" diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 16bb0fe0cb..ea44c805d0 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Anonymous"] edition = "2018" license = "Unlicense" @@ -13,31 +13,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -aura = { version = "2.0.0-rc3", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } -balances = { version = "2.0.0-rc3", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0-rc3", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } -randomness-collective-flip = { version = "2.0.0-rc3", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } -sudo = { version = "2.0.0-rc3", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } -system = { version = "2.0.0-rc3", default-features = false, package = "frame-system", path = "../../../frame/system" } -timestamp = { version = "2.0.0-rc3", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } -transaction-payment = { version = "2.0.0-rc3", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/executive" } +aura = { version = "2.0.0-rc4", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } +balances = { version = "2.0.0-rc4", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/support" } +grandpa = { version = "2.0.0-rc4", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } +randomness-collective-flip = { version = "2.0.0-rc4", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } +sudo = { version = "2.0.0-rc4", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +system = { version = "2.0.0-rc4", default-features = false, package = "frame-system", path = "../../../frame/system" } +timestamp = { version = "2.0.0-rc4", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } +transaction-payment = { version = "2.0.0-rc4", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc3"} -sp-consensus-aura = { version = "0.8.0-rc3", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc3"} -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/io" } -sp-offchain = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc4"} +sp-consensus-aura = { version = "0.8.0-rc4", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc4"} +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/io" } +sp-offchain = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/version" } -template = { version = "2.0.0-rc3", default-features = false, path = "../pallets/template", package = "pallet-template" } +template = { version = "2.0.0-rc4", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 80b02f1bc9..ab156635ec 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-bench" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2018" @@ -10,21 +10,21 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] log = "0.4.8" -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -node-testing = { version = "2.0.0-rc3", path = "../testing" } -node-runtime = { version = "2.0.0-rc3", path = "../runtime" } -sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api/" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +node-testing = { version = "2.0.0-rc4", path = "../testing" } +node-runtime = { version = "2.0.0-rc4", path = "../runtime" } +sc-cli = { version = "0.8.0-rc4", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api/" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } serde = "1.0.101" serde_json = "1.0.41" structopt = "0.3" derive_more = "0.99.2" kvdb = "0.6" kvdb-rocksdb = "0.8" -sp-trie = { version = "2.0.0-rc3", path = "../../../primitives/trie" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } +sp-trie = { version = "2.0.0-rc4", path = "../../../primitives/trie" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 9e31d734c3..d8710b0b4b 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-browser-testing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Tests for the in-browser light client." edition = "2018" @@ -17,5 +17,5 @@ wasm-bindgen-futures = "0.4.10" wasm-bindgen-test = "0.3.10" futures = "0.3.4" -node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0-rc3"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0-rc3"} +node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0-rc4"} +sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0-rc4"} diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4e2c0151b7..6202c1af69 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-cli" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Generic Substrate node implementation in Rust." build = "build.rs" @@ -46,76 +46,76 @@ tracing = "0.1.10" parking_lot = "0.10.0" # primitives -sp-authority-discovery = { version = "2.0.0-rc3", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0-rc3", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/timestamp" } -sp-finality-tracker = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/finality-tracker" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc3", path = "../../../primitives/keyring" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } +sp-authority-discovery = { version = "2.0.0-rc4", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "2.0.0-rc4", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/timestamp" } +sp-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/finality-tracker" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc4", path = "../../../primitives/keyring" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } # client dependencies -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } -sc-chain-spec = { version = "2.0.0-rc3", path = "../../../client/chain-spec" } -sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } -sc-transaction-pool = { version = "2.0.0-rc3", path = "../../../client/transaction-pool" } -sc-network = { version = "0.8.0-rc3", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0-rc3", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0-rc3", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0-rc3", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0-rc3", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0-rc3", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0-rc3", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0-rc3", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0-rc3", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0-rc3", path = "../../../client/authority-discovery" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } +sc-chain-spec = { version = "2.0.0-rc4", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../../../client/transaction-pool" } +sc-network = { version = "0.8.0-rc4", path = "../../../client/network" } +sc-consensus-babe = { version = "0.8.0-rc4", path = "../../../client/consensus/babe" } +grandpa = { version = "0.8.0-rc4", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8.0-rc4", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "2.0.0-rc4", path = "../../../client/offchain" } +sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.8.0-rc4", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0-rc4", path = "../../../client/tracing" } +sc-telemetry = { version = "2.0.0-rc4", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.8.0-rc4", path = "../../../client/authority-discovery" } # frame dependencies -pallet-indices = { version = "2.0.0-rc3", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0-rc3", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0-rc3", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0-rc3", path = "../../../frame/authority-discovery" } -pallet-staking = { version = "2.0.0-rc3", path = "../../../frame/staking" } -pallet-grandpa = { version = "2.0.0-rc3", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/timestamp" } +pallet-contracts = { version = "2.0.0-rc4", path = "../../../frame/contracts" } +frame-system = { version = "2.0.0-rc4", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-rc4", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../../../frame/transaction-payment" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "2.0.0-rc4", path = "../../../frame/authority-discovery" } +pallet-staking = { version = "2.0.0-rc4", path = "../../../frame/staking" } +pallet-grandpa = { version = "2.0.0-rc4", path = "../../../frame/grandpa" } # node-specific dependencies -node-runtime = { version = "2.0.0-rc3", path = "../runtime" } -node-rpc = { version = "2.0.0-rc3", path = "../rpc" } -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -node-executor = { version = "2.0.0-rc3", path = "../executor" } +node-runtime = { version = "2.0.0-rc4", path = "../runtime" } +node-rpc = { version = "2.0.0-rc4", path = "../rpc" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +node-executor = { version = "2.0.0-rc4", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0-rc3", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0-rc3", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-inspect = { version = "0.8.0-rc3", optional = true, path = "../inspect" } +sc-cli = { version = "0.8.0-rc4", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "2.0.0-rc4", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-inspect = { version = "0.8.0-rc4", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-rc3"} +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-rc4"} [target.'cfg(target_arch="x86_64")'.dependencies] -node-executor = { version = "2.0.0-rc3", path = "../executor", features = [ "wasmtime" ] } -sc-cli = { version = "0.8.0-rc3", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } +node-executor = { version = "2.0.0-rc4", path = "../executor", features = [ "wasmtime" ] } +sc-cli = { version = "0.8.0-rc4", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } [dev-dependencies] -sc-keystore = { version = "2.0.0-rc3", path = "../../../client/keystore" } -sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.8.0-rc3", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-rc3", path = "../../../client/consensus/epochs" } -sc-service-test = { version = "2.0.0-rc3", path = "../../../client/service/test" } +sc-keystore = { version = "2.0.0-rc4", path = "../../../client/keystore" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } +sc-consensus-babe = { version = "0.8.0-rc4", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8.0-rc4", path = "../../../client/consensus/epochs" } +sc-service-test = { version = "2.0.0-rc4", path = "../../../client/service/test" } futures = "0.3.4" tempfile = "3.1.0" assert_cmd = "1.0" @@ -126,12 +126,12 @@ platforms = "0.2.1" [build-dependencies] structopt = { version = "0.3.8", optional = true } -node-inspect = { version = "0.8.0-rc3", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0-rc3", optional = true, path = "../../../utils/frame/benchmarking-cli" } -substrate-build-script-utils = { version = "2.0.0-rc3", optional = true, path = "../../../utils/build-script-utils" } +node-inspect = { version = "0.8.0-rc4", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "2.0.0-rc4", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "2.0.0-rc4", optional = true, path = "../../../utils/build-script-utils" } [build-dependencies.sc-cli] -version = "0.8.0-rc3" +version = "0.8.0-rc4" package = "sc-cli" path = "../../../client/cli" optional = true diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 2c5a5db281..900f0cad43 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-executor" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" @@ -13,34 +13,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1" } -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -node-runtime = { version = "2.0.0-rc3", path = "../runtime" } -sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0-rc3", path = "../../../primitives/trie" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +node-runtime = { version = "2.0.0-rc4", path = "../runtime" } +sc-executor = { version = "0.8.0-rc4", path = "../../../client/executor" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } +sp-trie = { version = "2.0.0-rc4", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0-rc3", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "2.0.0-rc4", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0-rc3", path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc3", path = "../../../frame/system" } -node-testing = { version = "2.0.0-rc3", path = "../testing" } -pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0-rc3", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc3", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc3", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc3", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0-rc3", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc3", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0-rc3", path = "../../../primitives/externalities" } -substrate-test-client = { version = "2.0.0-rc3", path = "../../../test-utils/client" } +frame-support = { version = "2.0.0-rc4", path = "../../../frame/support" } +frame-system = { version = "2.0.0-rc4", path = "../../../frame/system" } +node-testing = { version = "2.0.0-rc4", path = "../testing" } +pallet-balances = { version = "2.0.0-rc4", path = "../../../frame/balances" } +pallet-contracts = { version = "2.0.0-rc4", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-rc4", path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-rc4", path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } +pallet-session = { version = "2.0.0-rc4", path = "../../../frame/session" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-rc4", path = "../../../frame/treasury" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.8.0-rc4", path = "../../../primitives/externalities" } +substrate-test-client = { version = "2.0.0-rc4", path = "../../../test-utils/client" } wabt = "0.9.2" [features] diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 91202191f1..e76f215a99 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-inspect" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.1" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0-rc4", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 75a8cbb332..0a66336046 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,13 +12,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/system" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc3", path = "../../../primitives/serializer" } +sp-serializer = { version = "2.0.0-rc4", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index ab4bc7a02d..2d21746f2a 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,5 +16,5 @@ futures = "0.1.29" hyper = "0.12.35" jsonrpc-core-client = { version = "14.2.0", default-features = false, features = ["http"] } log = "0.4.8" -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -sc-rpc = { version = "2.0.0-rc3", path = "../../../client/rpc" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 2bac8b6740..95d55fab64 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,24 +11,24 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } jsonrpc-core = "14.2.0" -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -node-runtime = { version = "2.0.0-rc3", path = "../runtime" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -pallet-contracts-rpc = { version = "0.8.0-rc3", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment/rpc/" } -substrate-frame-rpc-system = { version = "2.0.0-rc3", path = "../../../utils/frame/rpc/system" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } -sc-consensus-babe = { version = "0.8.0-rc3", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0-rc3", path = "../../../client/consensus/babe/rpc" } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../primitives/consensus/babe" } -sc-keystore = { version = "2.0.0-rc3", path = "../../../client/keystore" } -sc-consensus-epochs = { version = "0.8.0-rc3", path = "../../../client/consensus/epochs" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sc-finality-grandpa = { version = "0.8.0-rc3", path = "../../../client/finality-grandpa" } -sc-finality-grandpa-rpc = { version = "0.8.0-rc3", path = "../../../client/finality-grandpa/rpc" } -sc-rpc-api = { version = "0.8.0-rc3", path = "../../../client/rpc-api" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +node-runtime = { version = "2.0.0-rc4", path = "../runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +pallet-contracts-rpc = { version = "0.8.0-rc4", path = "../../../frame/contracts/rpc/" } +pallet-transaction-payment-rpc = { version = "2.0.0-rc4", path = "../../../frame/transaction-payment/rpc/" } +substrate-frame-rpc-system = { version = "2.0.0-rc4", path = "../../../utils/frame/rpc/system" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } +sc-consensus-babe = { version = "0.8.0-rc4", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.8.0-rc4", path = "../../../client/consensus/babe/rpc" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../../primitives/consensus/babe" } +sc-keystore = { version = "2.0.0-rc4", path = "../../../client/keystore" } +sc-consensus-epochs = { version = "0.8.0-rc4", path = "../../../client/consensus/epochs" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sc-finality-grandpa = { version = "0.8.0-rc4", path = "../../../client/finality-grandpa" } +sc-finality-grandpa-rpc = { version = "0.8.0-rc4", path = "../../../client/finality-grandpa/rpc" } +sc-rpc-api = { version = "0.8.0-rc4", path = "../../../client/rpc-api" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../primitives/block-builder" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 6db4057e8c..568b1afb5e 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -21,70 +21,70 @@ static_assertions = "1.1.0" hex-literal = { version = "0.2.1", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc3", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc3"} -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/inherents" } -node-primitives = { version = "2.0.0-rc3", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/version" } +sp-authority-discovery = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-rc4", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc4"} +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/inherents" } +node-primitives = { version = "2.0.0-rc4", default-features = false, path = "../primitives" } +sp-offchain = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "2.0.0-rc4", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/system" } -frame-system-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -frame-system-rpc-runtime-api = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc3", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-finality-tracker = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/finality-tracker" } -pallet-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/membership" } -pallet-multisig = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/multisig" } -pallet-offences = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/offences" } -pallet-offences-benchmarking = { version = "2.0.0-rc3", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-proxy = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/proxy" } -pallet-randomness-collective-flip = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0-rc3", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-session-benchmarking = { version = "2.0.0-rc3", path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/staking/reward-curve" } -pallet-scheduler = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/scheduler" } -pallet-society = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/society" } -pallet-sudo = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/sudo" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0-rc3", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/system" } +frame-system-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-system-rpc-runtime-api = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-authority-discovery = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/balances" } +pallet-collective = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc4", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/finality-tracker" } +pallet-grandpa = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/identity" } +pallet-membership = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/membership" } +pallet-multisig = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/multisig" } +pallet-offences = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "2.0.0-rc4", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-proxy = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/proxy" } +pallet-randomness-collective-flip = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "2.0.0-rc4", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "2.0.0-rc4", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/timestamp" } +pallet-treasury = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "2.0.0-rc4", default-features = false, path = "../../../frame/vesting" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 6bf4abc03d..fbf369cc3b 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-testing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" @@ -13,40 +13,40 @@ publish = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } -sc-service = { version = "0.8.0-rc3", features = ["test-helpers", "db"], path = "../../../client/service" } -sc-client-db = { version = "0.8.0-rc3", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api/" } +pallet-balances = { version = "2.0.0-rc4", path = "../../../frame/balances" } +sc-service = { version = "0.8.0-rc4", features = ["test-helpers", "db"], path = "../../../client/service" } +sc-client-db = { version = "0.8.0-rc4", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "1.3.1" } -pallet-contracts = { version = "2.0.0-rc3", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc3", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0-rc3", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0-rc3", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0-rc3", path = "../executor" } -node-primitives = { version = "2.0.0-rc3", path = "../primitives" } -node-runtime = { version = "2.0.0-rc3", path = "../runtime" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } -frame-support = { version = "2.0.0-rc3", path = "../../../frame/support" } -pallet-session = { version = "2.0.0-rc3", path = "../../../frame/session" } -pallet-society = { version = "2.0.0-rc3", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0-rc3", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0-rc3", path = "../../../frame/system" } -substrate-test-client = { version = "2.0.0-rc3", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc3", path = "../../../frame/treasury" } +pallet-contracts = { version = "2.0.0-rc4", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-rc4", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-rc4", path = "../../../frame/indices" } +sp-keyring = { version = "2.0.0-rc4", path = "../../../primitives/keyring" } +node-executor = { version = "2.0.0-rc4", path = "../executor" } +node-primitives = { version = "2.0.0-rc4", path = "../primitives" } +node-runtime = { version = "2.0.0-rc4", path = "../runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } +frame-support = { version = "2.0.0-rc4", path = "../../../frame/support" } +pallet-session = { version = "2.0.0-rc4", path = "../../../frame/session" } +pallet-society = { version = "2.0.0-rc4", path = "../../../frame/society" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-rc4", path = "../../../frame/staking" } +sc-executor = { version = "0.8.0-rc4", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +frame-system = { version = "2.0.0-rc4", path = "../../../frame/system" } +substrate-test-client = { version = "2.0.0-rc4", path = "../../../test-utils/client" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-rc4", path = "../../../frame/treasury" } wabt = "0.9.2" -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-finality-tracker = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../../client/block-builder" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" @@ -54,4 +54,4 @@ futures = "0.3.1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli" } +sc-cli = { version = "0.8.0-rc4", path = "../../../client/cli" } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index 743a5f25c0..b633ffa966 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "chain-spec-builder" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,9 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0-rc3", path = "../../../client/keystore" } -sc-chain-spec = { version = "2.0.0-rc3", path = "../../../client/chain-spec" } -node-cli = { version = "2.0.0-rc3", path = "../../node/cli" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } +sc-keystore = { version = "2.0.0-rc4", path = "../../../client/keystore" } +sc-chain-spec = { version = "2.0.0-rc4", path = "../../../client/chain-spec" } +node-cli = { version = "2.0.0-rc4", path = "../../node/cli" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index fa570f5759..92fffe898f 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "subkey" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,10 +12,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.1.29" -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -node-runtime = { version = "2.0.0-rc3", path = "../../node/runtime" } -node-primitives = { version = "2.0.0-rc3", path = "../../node/primitives" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +node-runtime = { version = "2.0.0-rc4", path = "../../node/runtime" } +node-primitives = { version = "2.0.0-rc4", path = "../../node/primitives" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } rand = "0.7.2" clap = "2.33.0" tiny-bip39 = "0.7" @@ -23,14 +23,14 @@ substrate-bip39 = "0.4.1" hex = "0.4.0" hex-literal = "0.2.1" codec = { package = "parity-scale-codec", version = "1.3.1" } -frame-system = { version = "2.0.0-rc3", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc3", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../../../frame/transaction-payment" } -pallet-grandpa = { version = "2.0.0-rc3", path = "../../../frame/grandpa" } +frame-system = { version = "2.0.0-rc4", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-rc4", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../../../frame/transaction-payment" } +pallet-grandpa = { version = "2.0.0-rc4", path = "../../../frame/grandpa" } rpassword = "4.0.1" itertools = "0.8.2" derive_more = { version = "0.99.2" } -sc-rpc = { version = "2.0.0-rc3", path = "../../../client/rpc" } +sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.2.0", features = ["http"] } hyper = "0.12.35" libp2p = { version = "0.19.1", default-features = false } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 606c1c4813..a32623ffdb 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,36 +14,36 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-rc3", path = "../executor" } -sp-externalities = { version = "0.8.0-rc3", path = "../../primitives/externalities" } +sc-executor = { version = "0.8.0-rc4", path = "../executor" } +sp-externalities = { version = "0.8.0-rc4", path = "../../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1" } hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } kvdb = "0.6.0" log = { version = "0.4.8" } parking_lot = "0.10.0" lazy_static = "1.4.0" -sp-database = { version = "2.0.0-rc3", path = "../../primitives/database" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } -sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc3", path = "../../utils/prometheus" } +sp-database = { version = "2.0.0-rc4", path = "../../primitives/database" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/version" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } +sp-trie = { version = "2.0.0-rc4", path = "../../primitives/trie" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc4", path = "../../utils/prometheus" } [dev-dependencies] kvdb-memorydb = "0.6.0" -sp-test-primitives = { version = "2.0.0-rc3", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../test-utils/runtime" } +sp-test-primitives = { version = "2.0.0-rc4", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../test-utils/runtime" } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 114092ab31..84a37bd16c 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -23,21 +23,21 @@ futures = "0.3.4" futures-timer = "3.0.1" libp2p = { version = "0.19.1", default-features = false, features = ["kad"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc3"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} prost = "0.6.1" rand = "0.7.2" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sc-keystore = { version = "2.0.0-rc3", path = "../keystore" } -sc-network = { version = "0.8.0-rc3", path = "../network" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sc-keystore = { version = "2.0.0-rc4", path = "../keystore" } +sc-network = { version = "0.8.0-rc4", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0-rc3", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } +sp-authority-discovery = { version = "2.0.0-rc4", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } [dev-dependencies] env_logger = "0.7.0" quickcheck = "0.9.0" -sc-peerset = { version = "2.0.0-rc3", path = "../peerset" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client"} +sc-peerset = { version = "2.0.0-rc4", path = "../peerset" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client"} diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 6e3ec49ea7..b6a853a1a1 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-basic-authorship" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,21 +16,21 @@ codec = { package = "parity-scale-codec", version = "1.3.1" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc3"} -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc3", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0-rc3", path = "../block-builder" } -sc-proposer-metrics = { version = "0.8.0-rc3", path = "../proposer-metrics" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc4", path = "../../primitives/inherents" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.8.0-rc4", path = "../block-builder" } +sc-proposer-metrics = { version = "0.8.0-rc4", path = "../proposer-metrics" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0-rc3", path = "../../client/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../../client/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } parking_lot = "0.10.0" diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index ce94526e0c..1e733355f7 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-block-builder" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,16 +13,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../primitives/block-builder" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../primitives/block-builder" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } +sp-trie = { version = "2.0.0-rc4", path = "../../primitives/trie" } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 669e7535dc..a3176deee5 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,12 +12,12 @@ description = "Substrate chain configurations." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-chain-spec-derive = { version = "2.0.0-rc3", path = "./derive" } +sc-chain-spec-derive = { version = "2.0.0-rc4", path = "./derive" } impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0-rc3", path = "../network" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-chain-spec = { version = "2.0.0-rc3", path = "../../primitives/chain-spec" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-chain-spec = { version = "2.0.0-rc4", path = "../../primitives/chain-spec" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 6c1153941f..75a290dc98 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 7ffc27749b..616b4f3481 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -24,23 +24,23 @@ tokio = { version = "0.2.9", features = [ "signal", "rt-core", "rt-threaded" ] } futures = "0.3.4" fdlimit = "0.1.4" serde_json = "1.0.41" -sc-informant = { version = "0.8.0-rc3", path = "../informant" } -sp-panic-handler = { version = "2.0.0-rc3", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } -substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-rc3"} -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } +sc-informant = { version = "0.8.0-rc4", path = "../informant" } +sp-panic-handler = { version = "2.0.0-rc4", path = "../../primitives/panic-handler" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } +substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-rc4"} +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0-rc3", path = "../tracing" } +sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 04bdc19fe4..d080fd39d0 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-aura" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" @@ -12,37 +12,37 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc3", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-rc4", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../../client/block-builder" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sc-keystore = { version = "2.0.0-rc3", path = "../../keystore" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sc-keystore = { version = "2.0.0-rc4", path = "../../keystore" } log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } -sp-version = { version = "2.0.0-rc3", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0-rc3", path = "../slots" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc3", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc3", path = "../../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3"} +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } +sp-version = { version = "2.0.0-rc4", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.8.0-rc4", path = "../slots" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-rc4", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-rc4", path = "../../telemetry" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc4"} [dev-dependencies] -sp-keyring = { version = "2.0.0-rc3", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc3", path = "../../executor" } -sc-network = { version = "0.8.0-rc3", path = "../../network" } -sc-network-test = { version = "0.8.0-rc3", path = "../../network/test" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc4", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-rc4", path = "../../executor" } +sc-network = { version = "0.8.0-rc4", path = "../../network" } +sc-network-test = { version = "0.8.0-rc4", path = "../../network/test" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 4f8f4db264..46c67e8917 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -14,31 +14,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0-rc3", path = "../../../primitives/version" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0-rc3", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc3", path = "../../telemetry" } -sc-keystore = { version = "2.0.0-rc3", path = "../../keystore" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sc-consensus-epochs = { version = "0.8.0-rc3", path = "../epochs" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-consensus-vrf = { version = "0.8.0-rc3", path = "../../../primitives/consensus/vrf" } -sc-consensus-uncles = { version = "0.8.0-rc3", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0-rc3", path = "../slots" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -fork-tree = { version = "2.0.0-rc3", path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3"} +sp-version = { version = "2.0.0-rc4", path = "../../../primitives/version" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0-rc4", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-rc4", path = "../../telemetry" } +sc-keystore = { version = "2.0.0-rc4", path = "../../keystore" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sc-consensus-epochs = { version = "0.8.0-rc4", path = "../epochs" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-consensus-vrf = { version = "0.8.0-rc4", path = "../../../primitives/consensus/vrf" } +sc-consensus-uncles = { version = "0.8.0-rc4", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0-rc4", path = "../slots" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +fork-tree = { version = "2.0.0-rc4", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc4"} futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" @@ -50,13 +50,13 @@ pdqselect = "0.1.0" derive_more = "0.99.2" [dev-dependencies] -sp-keyring = { version = "2.0.0-rc3", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc3", path = "../../executor" } -sc-network = { version = "0.8.0-rc3", path = "../../network" } -sc-network-test = { version = "0.8.0-rc3", path = "../../network/test" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../block-builder" } +sp-keyring = { version = "2.0.0-rc4", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-rc4", path = "../../executor" } +sc-network = { version = "0.8.0-rc4", path = "../../network" } +sc-network-test = { version = "0.8.0-rc4", path = "../../network/test" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../block-builder" } env_logger = "0.7.0" rand_chacha = "0.2.2" tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 401434cadb..03da64ff30 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" @@ -12,27 +12,27 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-consensus-babe = { version = "0.8.0-rc3", path = "../" } -sc-rpc-api = { version = "0.8.0-rc3", path = "../../../rpc-api" } +sc-consensus-babe = { version = "0.8.0-rc4", path = "../" } +sc-rpc-api = { version = "0.8.0-rc4", path = "../../../rpc-api" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../../primitives/consensus/babe" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0-rc3", path = "../../epochs" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0-rc4", path = "../../epochs" } futures = { version = "0.3.4", features = ["compat"] } derive_more = "0.99.2" -sp-api = { version = "2.0.0-rc3", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc3", path = "../../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../../primitives/application-crypto" } -sc-keystore = { version = "2.0.0-rc3", path = "../../../keystore" } +sp-api = { version = "2.0.0-rc4", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc4", path = "../../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../../../primitives/application-crypto" } +sc-keystore = { version = "2.0.0-rc4", path = "../../../keystore" } [dev-dependencies] -sc-consensus = { version = "0.8.0-rc3", path = "../../../consensus/common" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../consensus/common" } serde_json = "1.0.50" -sp-keyring = { version = "2.0.0-rc3", path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc4", path = "../../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index bb1f88a8ce..72bb051a0d 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,7 +12,7 @@ description = "Collection of common consensus specific imlementations for Substr targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 3911a59b72..22f8794974 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" -fork-tree = { version = "2.0.0-rc3", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc3"} -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0-rc3"} +fork-tree = { version = "2.0.0-rc4", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc4"} +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "2.0.0-rc4"} diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 0503fed54a..2da28b9ab9 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" @@ -22,20 +22,20 @@ parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" -sc-client-api = { path = "../../../client/api", version = "2.0.0-rc3" } -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc3" } -sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0-rc3" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0-rc3" } -sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0-rc3" } -sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0-rc3" } -sp-core = { path = "../../../primitives/core", version = "2.0.0-rc3" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0-rc3" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3" } +sc-client-api = { path = "../../../client/api", version = "2.0.0-rc4" } +sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc4" } +sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0-rc4" } +sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0-rc4" } +sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0-rc4" } +sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0-rc4" } +sp-core = { path = "../../../primitives/core", version = "2.0.0-rc4" } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0-rc4" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc4" } [dev-dependencies] -sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0-rc3" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0-rc3" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0-rc3" } +sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0-rc4" } +substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0-rc4" } +substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0-rc4" } tokio = { version = "0.2", features = ["rt-core", "macros"] } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index cd8d4cab42..b0b142fd84 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-pow" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" @@ -13,17 +13,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0-rc3", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.8.0-rc4", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } -sp-timestamp = { version = "2.0.0-rc3", path = "../../../primitives/timestamp" } +sp-timestamp = { version = "2.0.0-rc4", path = "../../../primitives/timestamp" } derive_more = "0.99.2" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc4"} diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 25a137d214..80eb83cca5 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -14,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../../primitives/application-crypto" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc3", path = "../../telemetry" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../../primitives/application-crypto" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sc-telemetry = { version = "2.0.0-rc4", path = "../../telemetry" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 0110b3a746..957e8e3c0b 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" @@ -12,10 +12,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0-rc3", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc3", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-authorship = { version = "2.0.0-rc4", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 22ca6e64aa..42cc60617a 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -23,22 +23,22 @@ parity-util-mem = { version = "0.6.1", default-features = false, features = ["st codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } blake2-rfc = "0.2.18" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0-rc3", path = "../executor" } -sc-state-db = { version = "0.8.0-rc3", path = "../state-db" } -sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-database = { version = "2.0.0-rc3", path = "../../primitives/database" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-rc4", path = "../executor" } +sc-state-db = { version = "0.8.0-rc4", path = "../state-db" } +sp-trie = { version = "2.0.0-rc4", path = "../../primitives/trie" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-database = { version = "2.0.0-rc4", path = "../../primitives/database" } parity-db = { version = "0.1.2", optional = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc3", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc4", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" kvdb-rocksdb = "0.8" diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index f1499693f3..b12156aeb1 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,22 +15,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0-rc3", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0-rc3", path = "../../primitives/panic-handler" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-trie = { version = "2.0.0-rc4", path = "../../primitives/trie" } +sp-serializer = { version = "2.0.0-rc4", path = "../../primitives/serializer" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } +sp-panic-handler = { version = "2.0.0-rc4", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc3", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0-rc3", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0-rc3", path = "common" } -sc-executor-wasmi = { version = "0.8.0-rc3", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0-rc3", path = "wasmtime", optional = true } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.8.0-rc4", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8.0-rc4", path = "common" } +sc-executor-wasmi = { version = "0.8.0-rc4", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8.0-rc4", path = "wasmtime", optional = true } parking_lot = "0.10.0" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -39,13 +39,13 @@ libsecp256k1 = "0.3.4" assert_matches = "1.3.0" wabt = "0.9.2" hex-literal = "0.2.1" -sc-runtime-test = { version = "2.0.0-rc3", path = "runtime-test" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } +sc-runtime-test = { version = "2.0.0-rc4", path = "runtime-test" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../test-utils/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } test-case = "0.3.3" -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc3", path = "../../primitives/tracing" } -sc-tracing = { version = "2.0.0-rc3", path = "../tracing" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc4", path = "../../primitives/tracing" } +sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } tracing = "0.1.14" [features] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index a6ff79a067..970fc2ded3 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -18,11 +18,11 @@ derive_more = "0.99.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.1" } wasmi = "0.6.2" -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc3", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc3", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0-rc3", path = "../../../primitives/serializer" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc4", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../../../primitives/runtime-interface" } +sp-serializer = { version = "2.0.0-rc4", path = "../../../primitives/serializer" } [features] default = [] diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 917df5d573..c01a9428f4 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-runtime-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,12 +13,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.8.0-rc3", default-features = false, path = "../../../primitives/sandbox" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -sp-allocator = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/allocator" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.8.0-rc4", default-features = false, path = "../../../primitives/sandbox" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +sp-allocator = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/allocator" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index f3c2ee2c67..6f5486a578 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" wasmi = "0.6.2" codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-executor-common = { version = "0.8.0-rc3", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc3", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc3", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0-rc4", path = "../common" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc4", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 6d008bcee6..26eddd1da6 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,11 +16,11 @@ log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-executor-common = { version = "0.8.0-rc3", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc3", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc3", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0-rc4", path = "../common" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-rc4", path = "../../../primitives/allocator" } wasmtime = { package = "substrate-wasmtime", version = "0.16.0-threadsafe.4" } wasmtime-runtime = { package = "substrate-wasmtime-runtime", version = "0.16.0-threadsafe.4" } wasmtime-environ = "0.16" diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 29b9cdaeba..36b1d59b0c 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -fork-tree = { version = "2.0.0-rc3", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-rc4", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" @@ -23,38 +23,38 @@ parking_lot = "0.10.0" rand = "0.7.2" assert_matches = "1.3.0" parity-scale-codec = { version = "1.3.1", features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } -sp-arithmetic = { version = "2.0.0-rc3", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc3", path = "../../client/consensus/common" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } -sc-keystore = { version = "2.0.0-rc3", path = "../keystore" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "2.0.0-rc4", path = "../../primitives/arithmetic" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc4", path = "../../client/consensus/common" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } +sc-keystore = { version = "2.0.0-rc4", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-inherents = { version = "2.0.0-rc3", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sc-network-gossip = { version = "0.8.0-rc3", path = "../network-gossip" } -sp-finality-tracker = { version = "2.0.0-rc3", path = "../../primitives/finality-tracker" } -sp-finality-grandpa = { version = "2.0.0-rc3", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc3"} -sc-block-builder = { version = "0.8.0-rc3", path = "../block-builder" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-inherents = { version = "2.0.0-rc4", path = "../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sc-network-gossip = { version = "0.8.0-rc4", path = "../network-gossip" } +sp-finality-tracker = { version = "2.0.0-rc4", path = "../../primitives/finality-tracker" } +sp-finality-grandpa = { version = "2.0.0-rc4", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} +sc-block-builder = { version = "0.8.0-rc4", path = "../block-builder" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sc-network-test = { version = "0.8.0-rc3", path = "../network/test" } -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sc-network-test = { version = "0.8.0-rc4", path = "../network/test" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } env_logger = "0.7.0" tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index d364e47b84..a7d8e64087 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" @@ -8,7 +8,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -sc-finality-grandpa = { version = "0.8.0-rc3", path = "../" } +sc-finality-grandpa = { version = "0.8.0-rc4", path = "../" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" @@ -20,4 +20,4 @@ log = "0.4.8" derive_more = "0.99.2" [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 671535933b..d2df78537d 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-informant" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" @@ -17,10 +17,10 @@ futures = "0.3.4" log = "0.4.8" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } wasm-timer = "0.2" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } sp-utils = { version = "2.0.0-rc2", path = "../../primitives/utils" } sp-transaction-pool = { version = "2.0.0-rc2", path = "../../primitives/transaction-pool" } parking_lot = "0.10.2" diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 47308dd692..585d3af521 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../primitives/application-crypto" } hex = "0.4.0" merlin = { version = "2.0", default-features = false } parking_lot = "0.10.0" diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index 490da15364..ced9989c9e 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "components for a light client" name = "sc-light" -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 334a85035f..51e15e24ce 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0-rc3" +version = "0.8.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -19,12 +19,12 @@ futures-timer = "3.0.1" libp2p = { version = "0.19.1", default-features = false } log = "0.4.8" lru = "0.4.3" -sc-network = { version = "0.8.0-rc3", path = "../network" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } wasm-timer = "0.2" [dev-dependencies] async-std = "1.5" quickcheck = "0.9.0" rand = "0.7.2" -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 8467aa1154..f0ba362e48 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0-rc3" +version = "0.8.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -26,7 +26,7 @@ derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0-rc3", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-rc4", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.1" futures_codec = "0.3.3" @@ -39,23 +39,23 @@ lru = "0.4.0" nohash-hasher = "0.2.0" parking_lot = "0.10.0" pin-project = "0.4.6" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc3", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc4", path = "../../utils/prometheus" } prost = "0.6.1" rand = "0.7.2" -sc-block-builder = { version = "0.8.0-rc3", path = "../block-builder" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sc-peerset = { version = "2.0.0-rc3", path = "../peerset" } +sc-block-builder = { version = "0.8.0-rc4", path = "../block-builder" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sc-peerset = { version = "2.0.0-rc4", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0-rc3", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } +sp-arithmetic = { version = "2.0.0-rc4", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } thiserror = "1" unsigned-varint = { version = "0.3.1", features = ["futures", "futures-codec"] } void = "1.0.2" @@ -74,10 +74,10 @@ env_logger = "0.7.0" libp2p = { version = "0.19.1", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -sp-test-primitives = { version = "2.0.0-rc3", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } +sp-test-primitives = { version = "2.0.0-rc4", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" [features] diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 27acabbb22..393887572c 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Integration tests for Substrate network protocol" name = "sc-network-test" -version = "0.8.0-rc3" +version = "0.8.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,23 +13,23 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.8.0-rc3", path = "../" } +sc-network = { version = "0.8.0-rc4", path = "../" } log = "0.4.8" parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" libp2p = { version = "0.19.1", default-features = false } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../../primitives/consensus/babe" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../../primitives/consensus/babe" } env_logger = "0.7.0" -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../../test-utils/runtime" } tempfile = "3.1.0" -sc-service = { version = "0.8.0-rc3", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 819d6ac3a5..cd5a63a75c 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,23 +13,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } fnv = "1.0.6" futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0-rc3", path = "../../primitives/offchain" } +sp-offchain = { version = "2.0.0-rc4", path = "../../primitives/offchain" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sc-keystore = { version = "2.0.0-rc3", path = "../keystore" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sc-keystore = { version = "2.0.0-rc4", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] hyper = "0.13.2" @@ -37,10 +37,10 @@ hyper-rustls = "0.20" [dev-dependencies] env_logger = "0.7.0" -sc-client-db = { version = "0.8.0-rc3", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0-rc3", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sc-client-db = { version = "0.8.0-rc4", default-features = true, path = "../db/" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } tokio = "0.2" lazy_static = "1.4.0" diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 205260ad72..eb7f237548 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,7 +3,7 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" libp2p = { version = "0.19.1", default-features = false } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils"} +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 5c960d1d78..b10336f340 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-proposer-metrics" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,4 +13,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc3"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 3e3195b914..a991cf9afa 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -21,11 +21,11 @@ jsonrpc-derive = "14.2.1" jsonrpc-pubsub = "14.2.0" log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-rc3"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-rc3"} +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-rc4"} +sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-rc4"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0-rc3", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "2.0.0-rc4", path = "../../primitives/rpc" } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index b1ec04f5e4..155729817d 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-server" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -17,7 +17,7 @@ pubsub = { package = "jsonrpc-pubsub", version = "14.2.0" } log = "0.4.8" serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] http = { package = "jsonrpc-http-server", version = "14.2.0" } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 9cda4451c1..9568a4d44f 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,38 +12,38 @@ description = "Substrate Client RPC" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-rpc-api = { version = "0.8.0-rc3", path = "../rpc-api" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } +sc-rpc-api = { version = "0.8.0-rc4", path = "../rpc-api" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.2.0" log = "0.4.8" -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } rpc = { package = "jsonrpc-core", version = "14.2.0" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0-rc3", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0-rc3", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sp-rpc = { version = "2.0.0-rc3", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "2.0.0-rc3", path = "../../primitives/chain-spec" } -sc-executor = { version = "0.8.0-rc3", path = "../executor" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0-rc3", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } +sp-session = { version = "2.0.0-rc4", path = "../../primitives/session" } +sp-offchain = { version = "2.0.0-rc4", path = "../../primitives/offchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sp-rpc = { version = "2.0.0-rc4", path = "../../primitives/rpc" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sp-chain-spec = { version = "2.0.0-rc4", path = "../../primitives/chain-spec" } +sc-executor = { version = "0.8.0-rc4", path = "../executor" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../client/block-builder" } +sc-keystore = { version = "2.0.0-rc4", path = "../keystore" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0-rc3", path = "../transaction-pool" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../transaction-pool" } lazy_static = "1.4.0" diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 1740e6fad4..f63d3f183d 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -40,39 +40,39 @@ hash-db = "0.15.2" serde = "1.0.101" serde_json = "1.0.41" sysinfo = "0.13.3" -sc-keystore = { version = "2.0.0-rc3", path = "../keystore" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-trie = { version = "2.0.0-rc3", path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc3", path = "../../primitives/externalities" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-session = { version = "2.0.0-rc3", path = "../../primitives/session" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sc-network = { version = "0.8.0-rc3", path = "../network" } -sc-chain-spec = { version = "2.0.0-rc3", path = "../chain-spec" } -sc-light = { version = "2.0.0-rc3", path = "../light" } -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0-rc3", default-features = false, path = "../db" } +sc-keystore = { version = "2.0.0-rc4", path = "../keystore" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-trie = { version = "2.0.0-rc4", path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-rc4", path = "../../primitives/externalities" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-session = { version = "2.0.0-rc4", path = "../../primitives/session" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sc-network = { version = "0.8.0-rc4", path = "../network" } +sc-chain-spec = { version = "2.0.0-rc4", path = "../chain-spec" } +sc-light = { version = "2.0.0-rc4", path = "../light" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sc-client-db = { version = "0.8.0-rc4", default-features = false, path = "../db" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-executor = { version = "0.8.0-rc3", path = "../executor" } -sc-transaction-pool = { version = "2.0.0-rc3", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0-rc3", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0-rc3", path = "../rpc" } -sc-block-builder = { version = "0.8.0-rc3", path = "../block-builder" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../primitives/block-builder" } +sc-executor = { version = "0.8.0-rc4", path = "../executor" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../transaction-pool" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "2.0.0-rc4", path = "../rpc-servers" } +sc-rpc = { version = "2.0.0-rc4", path = "../rpc" } +sc-block-builder = { version = "0.8.0-rc4", path = "../block-builder" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../primitives/block-builder" } sc-informant = { version = "0.8.0-rc2", path = "../informant" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } -sc-offchain = { version = "2.0.0-rc3", path = "../offchain" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } +sc-offchain = { version = "2.0.0-rc4", path = "../offchain" } parity-multiaddr = { package = "parity-multiaddr", version = "0.7.3" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-rc3"} -sc-tracing = { version = "2.0.0-rc3", path = "../tracing" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-rc4"} +sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } tracing = "0.1.10" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } @@ -88,7 +88,7 @@ tempfile = "3.1.0" directories = "2.0.2" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc3", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0-rc3", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0-rc3", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-rc4", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.8.0-rc4", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "2.0.0-rc4", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 7d61e86708..6a886ebcba 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -20,25 +20,25 @@ log = "0.4.8" env_logger = "0.7.0" fdlimit = "0.1.4" parking_lot = "0.10.0" -sc-light = { version = "2.0.0-rc3", path = "../../light" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } -sp-externalities = { version = "0.8.0-rc3", path = "../../../primitives/externalities" } -sp-trie = { version = "2.0.0-rc3", path = "../../../primitives/trie" } -sp-storage = { version = "2.0.0-rc3", path = "../../../primitives/storage" } -sc-client-db = { version = "0.8.0-rc3", default-features = false, path = "../../db" } +sc-light = { version = "2.0.0-rc4", path = "../../light" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.8.0-rc4", path = "../../../primitives/externalities" } +sp-trie = { version = "2.0.0-rc4", path = "../../../primitives/trie" } +sp-storage = { version = "2.0.0-rc4", path = "../../../primitives/storage" } +sc-client-db = { version = "0.8.0-rc4", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0-rc3", default-features = false, features = ["test-helpers"], path = "../../service" } -sc-network = { version = "0.8.0-rc3", path = "../../network" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -sc-client-api = { version = "2.0.0-rc3", path = "../../api" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../block-builder" } -sc-executor = { version = "0.8.0-rc3", path = "../../executor" } -sp-panic-handler = { version = "2.0.0-rc3", path = "../../../primitives/panic-handler" } +sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.8.0-rc4", path = "../../network" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +sc-client-api = { version = "2.0.0-rc4", path = "../../api" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../block-builder" } +sc-executor = { version = "0.8.0-rc4", path = "../../executor" } +sp-panic-handler = { version = "2.0.0-rc4", path = "../../../primitives/panic-handler" } parity-scale-codec = "1.3.1" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index ee9bbf7273..7cc8d41e76 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = "0.10.0" log = "0.4.8" -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 13a1c81d15..95c430dad7 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index c4345648ef..c4564e5fe5 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-tracing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -22,7 +22,7 @@ slog = { version = "2.5.2", features = ["nested-values"] } tracing-core = "0.1.7" sp-tracing = { version = "2.0.0-rc2", path = "../../primitives/tracing" } -sc-telemetry = { version = "2.0.0-rc3", path = "../telemetry" } +sc-telemetry = { version = "2.0.0-rc4", path = "../telemetry" } [dev-dependencies] tracing = "0.1.10" diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index e837f40a34..bd271d8ba1 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -20,23 +20,23 @@ intervalier = "0.4.0" log = "0.4.8" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } parking_lot = "0.10.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc3"} -sc-client-api = { version = "2.0.0-rc3", path = "../api" } -sc-transaction-graph = { version = "2.0.0-rc3", path = "./graph" } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc3", path = "../../primitives/tracing" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc3", path = "../../primitives/utils" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} +sc-client-api = { version = "2.0.0-rc4", path = "../api" } +sc-transaction-graph = { version = "2.0.0-rc4", path = "./graph" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc4", path = "../../primitives/tracing" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils" } wasm-timer = "0.2" [dev-dependencies] assert_matches = "1.3.0" hex = "0.4" -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -substrate-test-runtime-transaction-pool = { version = "2.0.0-rc3", path = "../../test-utils/runtime/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc3", path = "../block-builder" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +substrate-test-runtime-transaction-pool = { version = "2.0.0-rc4", path = "../../test-utils/runtime/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-rc4", path = "../block-builder" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index cb16af0f53..0a30b3a4c9 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-graph" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -18,18 +18,18 @@ log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc3", path = "../../../primitives/utils" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-utils = { version = "2.0.0-rc4", path = "../../../primitives/utils" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" codec = { package = "parity-scale-codec", version = "1.3.1" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../../test-utils/runtime" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../../test-utils/runtime" } criterion = "0.3" [[bench]] diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 6f55839881..78fc85acc6 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,6 +6,60 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.0-rc3 -> 2.0.0-rc4 (Rhinoceros) + +Runtime +------- + +* Staking Payout Creates Controller (#6496) +* `pallet-scheduler`: Check that `when` is not in the past (#6480) +* Fix `sp-api` handling of multiple arguments (#6484) +* Fix issues with `Operational` transactions validity and prioritization. (#6435) +* pallet-atomic-swap: generialized swap action (#6421) +* Avoid multisig reentrancy (#6445) +* Root origin use no filter by default. Scheduler and Democracy dispatch without asserting BaseCallFilter (#6408) +* Scale and increase validator count (#6417) +* Pallet: Atomic Swap (#6349) +* Restrict remove_proxies (#6383) +* Stored call in multisig (#6319) +* Allow Sudo to do anything (#6375) +* vesting: Force Vested Transfer (#6368) +* Add events for balance reserve and unreserve functions (#6330) +* Introduce frozen indices. (#6307) + +Client +------ + +* client/network/service: Add primary dimension to connection metrics (#6472) +* Fix Babe secondary plain slots claiming (#6451) +* add network propagated metrics (#6438) +* client/authority-discovery: Compare PeerIds and not Multihashes (#6414) +* Update sync chain info on own block import (#6424) +* Remove --legacy-network-protocol CLI flag (#6411) +* Runtime interface to add support for tracing from wasm (#6381) +* Remove penalty on duplicate Status message (#6377) +* Fix the broken weight multiplier update function (#6334) +* client/authority-discovery: Don't add own address to priority group (#6370) +* Split the service initialisation up into seperate functions (#6332) +* Fix transaction pool event sending (#6341) +* Add a [prefix]_process_start_time_seconds metric (#6315) +* new crate sc-light (#6235) +* Allow adding a prefix to the informant (#6174) + +API +--- + +* seal: Remove ext_dispatch_call and ext_get_runtime_storage (#6464) +* seal: Refactor ext_gas_price (#6478) +* Implement nested storage transactions (#6269) +* Allow empty values in the storage (#6364) +* add system_dryRun (#6300) +* Introduce in-origin filtering (#6318) +* add extend_lock for StorageLock (#6323) +* Deprecate FunctionOf and remove its users (#6340) +* transaction-pool: expose blocking api for tx submission (#6325) + + ## 2.0.0-rc2 -> 2.0.0-rc3 Runtime diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 33882671a4..4deb7b8a9b 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index ce32d8b783..829c40b675 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-atomic-swap" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 5a60d23270..a648be5f10 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-aura" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,23 +12,23 @@ description = "FRAME AURA consensus pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -sp-consensus-aura = { version = "0.8.0-rc3", path = "../../primitives/consensus/aura", default-features = false } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../timestamp" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../session" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +sp-consensus-aura = { version = "0.8.0-rc4", path = "../../primitives/consensus/aura", default-features = false } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } lazy_static = "1.4.0" parking_lot = "0.10.0" diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index e3c7a256a9..3270437ce8 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,20 +12,20 @@ description = "FRAME pallet for authority discovery" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-authority-discovery = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } +sp-authority-discovery = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc3", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc4", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 9cc25b075d..08114eb401 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authorship" -version = "2.0.0-rc3" +version = "2.0.0-rc4" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" @@ -13,17 +13,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 5e9dcf7fb5..845acce5f2 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-babe" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,22 +14,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/timestamp" } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../session" } -sp-consensus-babe = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/vrf" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../session" } +sp-consensus-babe = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/vrf" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 02b5732e00..88c8657d47 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-balances" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../transaction-payment" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../transaction-payment" } [features] default = ["std"] diff --git a/frame/benchmark/Cargo.toml b/frame/benchmark/Cargo.toml index 2821d52f5b..79d9a8d771 100644 --- a/frame/benchmark/Cargo.toml +++ b/frame/benchmark/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-benchmark" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std"] diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 5c6306ebbb..0823ec626c 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] linregress = "0.1" paste = "0.1" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api = { version = "2.0.0-rc3", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc3", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0-rc3", path = "../../primitives/std", default-features = false } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-api = { version = "2.0.0-rc4", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "2.0.0-rc4", path = "../../primitives/std", default-features = false } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io", default-features = false } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [features] default = [ "std" ] diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 5517f3b03f..c1b2f01089 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-collective" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 2dee486fcf..348b8ff0e0 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,23 +17,23 @@ pwasm-utils = { version = "0.12.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } parity-wasm = { version = "0.41.0", default-features = false } wasmi-validation = { version = "0.3.0", default-features = false } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/sandbox" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0-rc3", default-features = false, path = "common" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/sandbox" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0-rc4", default-features = false, path = "common" } [dev-dependencies] wabt = "0.9.2" assert_matches = "1.3.0" hex-literal = "0.2.1" pretty_assertions = "0.6.1" -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0-rc3", path = "../randomness-collective-flip" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "2.0.0-rc4", path = "../randomness-collective-flip" } [features] default = ["std"] diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 520b723933..e6e2bc653a 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 75dc1bf3fb..35989a3490 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,14 +16,14 @@ codec = { package = "parity-scale-codec", version = "1.3.1" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc3", path = "../../../primitives/rpc" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-rc4", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0-rc3", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc3", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +pallet-contracts-primitives = { version = "2.0.0-rc4", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc4", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 3596677316..e97003c44d 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "Runtime API definition required by Contracts RPC extensions." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0-rc3", default-features = false, path = "../../common" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/runtime" } +pallet-contracts-primitives = { version = "2.0.0-rc4", default-features = false, path = "../../common" } [features] default = ["std"] diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index fea378caca..9532be0e8e 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-democracy" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-scheduler = { version = "2.0.0-rc3", path = "../scheduler" } -sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } -substrate-test-utils = { version = "2.0.0-rc3", path = "../../test-utils" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-scheduler = { version = "2.0.0-rc4", path = "../scheduler" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } +substrate-test-utils = { version = "2.0.0-rc4", path = "../../test-utils" } hex-literal = "0.2.1" [features] diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 08cdc5a98e..afbd53d3da 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-npos-elections = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/npos-elections" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0-rc3", path = "../../test-utils" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +substrate-test-utils = { version = "2.0.0-rc4", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index d03ad4f056..b7914d66fd 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 1a6d691cde..8b030be4b7 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-evm" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../balances" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } primitive-types = { version = "0.7.0", default-features = false, features = ["rlp"] } rlp = { version = "0.4", default-features = false } evm = { version = "0.16", default-features = false } diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index f93ffcf9e4..50d398a512 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,13 +13,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } [features] diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 597f2266c3..cf09a3d4b2 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../balances" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core", default-features = false } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index a922333eb9..52225d9824 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-executive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,22 +13,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/tracing" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/tracing" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } [dev-dependencies] hex-literal = "0.2.1" -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } -pallet-indices = { version = "2.0.0-rc3", path = "../indices" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0-rc3", path = "../transaction-payment" } -sp-version = { version = "2.0.0-rc3", path = "../../primitives/version" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } +pallet-indices = { version = "2.0.0-rc4", path = "../indices" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-transaction-payment = { version = "2.0.0-rc4", path = "../transaction-payment" } +sp-version = { version = "2.0.0-rc4", path = "../../primitives/version" } [features] default = ["std"] diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 497f4fdec7..f9922af84e 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-finality-tracker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-finality-tracker = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/finality-tracker" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/finality-tracker" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index cdac7a6d6d..f39a458378 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-generic-asset" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Centrality Developers "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 1ec939c9bd..0f2477d50e 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-grandpa" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,27 +14,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/finality-grandpa" } -sp-session = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/session" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../session" } -pallet-finality-tracker = { version = "2.0.0-rc3", default-features = false, path = "../finality-tracker" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/finality-grandpa" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/session" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../session" } +pallet-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../finality-tracker" } [dev-dependencies] grandpa = { package = "finality-grandpa", version = "0.12.3", features = ["derive-codec"] } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-offences = { version = "2.0.0-rc3", path = "../offences" } -pallet-staking = { version = "2.0.0-rc3", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../timestamp" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-offences = { version = "2.0.0-rc4", path = "../offences" } +pallet-staking = { version = "2.0.0-rc4", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../timestamp" } [features] default = ["std"] diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 0435d8c086..8dcfd5bd2d 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-identity" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 2f89ff2cb2..7324342ec8 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-im-online" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,20 +12,20 @@ description = "FRAME's I'm online pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0-rc3", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "2.0.0-rc4", default-features = false, path = "../authorship" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../session" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../session" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std", "pallet-session/historical"] diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 2c856064e7..3ec8ea363b 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-indices" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-keyring = { version = "2.0.0-rc4", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index e0c94da308..5df5d4ad6e 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-membership" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index a8fb9eae5f..ae9cf736e9 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-metadata" -version = "11.0.0-rc3" +version = "11.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 44ea4dc3e9..d0b79bf4e3 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-multisig" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 544a0dc734..143e5b198e 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-nicks" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 0b8b74c4a9..74487ba163 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,18 +12,18 @@ description = "FRAME offences pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../balances" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index ad8520484e..b942a98baa 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,27 +13,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../system" } -pallet-babe = { version = "2.0.0-rc3", default-features = false, path = "../../babe" } -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../../balances" } -pallet-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../grandpa" } -pallet-im-online = { version = "2.0.0-rc3", default-features = false, path = "../../im-online" } -pallet-offences = { version = "2.0.0-rc3", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../../session" } -pallet-staking = { version = "2.0.0-rc3", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../system" } +pallet-babe = { version = "2.0.0-rc4", default-features = false, path = "../../babe" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../../balances" } +pallet-grandpa = { version = "2.0.0-rc4", default-features = false, path = "../../grandpa" } +pallet-im-online = { version = "2.0.0-rc4", default-features = false, path = "../../im-online" } +pallet-offences = { version = "2.0.0-rc4", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../../session" } +pallet-staking = { version = "2.0.0-rc4", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/staking" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } [dev-dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../../timestamp" } serde = { version = "1.0.101" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc3", path = "../../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 215f362cc8..07e2abac31 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,19 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-utility = { version = "2.0.0-rc3", path = "../utility" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-utility = { version = "2.0.0-rc4", path = "../utility" } [features] default = ["std"] diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 7e64539491..64324bc8c5 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 33f7b5e521..63f4d4dcdd 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-recovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 7db67bb3c1..43507bd364 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scheduler" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -11,16 +11,16 @@ description = "FRAME example pallet" [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core", default-features = false } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index d1e0a5d62e..05fc56fc65 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scored-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 38eef24bc6..c882df7115 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,20 +14,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../timestamp" } -sp-trie = { version = "2.0.0-rc3", optional = true, default-features = false, path = "../../primitives/trie" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } +sp-trie = { version = "2.0.0-rc4", optional = true, default-features = false, path = "../../primitives/trie" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-application-crypto = { version = "2.0.0-rc3", path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index b2c70c28d1..391b80237e 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,22 @@ description = "FRAME sessions pallet benchmarking" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../system" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../support" } -pallet-staking = { version = "2.0.0-rc3", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-session = { version = "2.0.0-rc3", default-features = false, path = "../../session" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../system" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../support" } +pallet-staking = { version = "2.0.0-rc4", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../../session" } [dev-dependencies] serde = { version = "1.0.101" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../../staking/reward-curve" } -sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../../timestamp" } -pallet-balances = { version = "2.0.0-rc3", path = "../../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../../staking/reward-curve" } +sp-io ={ version = "2.0.0-rc4", path = "../../../primitives/io" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../../timestamp" } +pallet-balances = { version = "2.0.0-rc4", path = "../../balances" } [features] default = ["std"] diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index eb28046d3f..67c4c32966 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-society" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 45b2b42d97..144095cfa9 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,29 +15,29 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-npos-elections = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/npos-elections" } -sp-io ={ version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-rc3", default-features = false, features = ["historical"], path = "../session" } -pallet-authorship = { version = "2.0.0-rc3", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-npos-elections = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/npos-elections" } +sp-io ={ version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-rc4", default-features = false, features = ["historical"], path = "../session" } +pallet-authorship = { version = "2.0.0-rc4", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } # Optional imports for benchmarking -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0-rc3", path = "../../test-utils" } -frame-benchmarking = { version = "2.0.0-rc3", path = "../benchmarking" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../staking/reward-curve" } +substrate-test-utils = { version = "2.0.0-rc4", path = "../../test-utils" } +frame-benchmarking = { version = "2.0.0-rc4", path = "../benchmarking" } rand_chacha = { version = "0.2" } parking_lot = "0.10.2" env_logger = "0.7.1" diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index 97d79ecad5..5cd0ae1180 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -15,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -pallet-staking = { version = "2.0.0-rc3", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "2.0.0-rc3", path = "../reward-curve" } -pallet-session = { version = "2.0.0-rc3", path = "../../session" } -pallet-indices = { version = "2.0.0-rc3", path = "../../indices" } -pallet-balances = { version = "2.0.0-rc3", path = "../../balances" } -pallet-timestamp = { version = "2.0.0-rc3", path = "../../timestamp" } -frame-system = { version = "2.0.0-rc3", path = "../../system" } -frame-support = { version = "2.0.0-rc3", path = "../../support" } -sp-std = { version = "2.0.0-rc3", path = "../../../primitives/std" } -sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-npos-elections = { version = "2.0.0-rc3", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-rc4", path = "..", features = ["runtime-benchmarks"] } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../reward-curve" } +pallet-session = { version = "2.0.0-rc4", path = "../../session" } +pallet-indices = { version = "2.0.0-rc4", path = "../../indices" } +pallet-balances = { version = "2.0.0-rc4", path = "../../balances" } +pallet-timestamp = { version = "2.0.0-rc4", path = "../../timestamp" } +frame-system = { version = "2.0.0-rc4", path = "../../system" } +frame-support = { version = "2.0.0-rc4", path = "../../support" } +sp-std = { version = "2.0.0-rc4", path = "../../../primitives/std" } +sp-io ={ version = "2.0.0-rc4", path = "../../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-npos-elections = { version = "2.0.0-rc4", path = "../../../primitives/npos-elections" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } [[bin]] name = "submit_solution" diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index db4241b182..3d677c7456 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -21,4 +21,4 @@ proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 1bdd2aab69..8bb5499770 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sudo" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index e648eaf32d..596faf2639 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,25 +15,25 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "11.0.0-rc3", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0-rc3", path = "./procedural" } +frame-metadata = { version = "11.0.0-rc4", default-features = false, path = "../metadata" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +frame-support-procedural = { version = "2.0.0-rc4", path = "./procedural" } paste = "0.1.6" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0-rc3", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc4", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" smallvec = "1.4.0" [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0-rc3", path = "../system" } +frame-system = { version = "2.0.0-rc4", path = "../system" } [features] default = ["std"] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 4e09aec190..593b2a1635 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0-rc3", path = "./tools" } +frame-support-procedural-tools = { version = "2.0.0-rc4", path = "./tools" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full"] } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 0f9faa899e..a00dd97a66 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0-rc3", path = "./derive" } +frame-support-procedural-tools-derive = { version = "2.0.0-rc4", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index 191c27796b..3da66cf692 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index d6e7d7d633..682001564b 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8.0-rc3", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } +sp-io ={ version = "2.0.0-rc4", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.8.0-rc4", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } trybuild = "1.0.17" pretty_assertions = "0.6.1" rustversion = "1.0.0" diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index af3288a907..2173ea8cee 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/version" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] criterion = "0.2.11" -sp-externalities = { version = "0.8.0-rc3", path = "../../primitives/externalities" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../test-utils/runtime/client" } +sp-externalities = { version = "0.8.0-rc4", path = "../../primitives/externalities" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } [features] default = ["std"] diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index b1636c21e5..c278bad150 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-benchmarking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,16 +13,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../../benchmarking" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../system" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../support" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../benchmarking" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../system" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../support" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/core" } [dev-dependencies] serde = { version = "1.0.101" } -sp-io ={ version = "2.0.0-rc3", path = "../../../primitives/io" } +sp-io ={ version = "2.0.0-rc4", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index d919fd1b58..8d340ad7de 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "Runtime API definition required by System RPC extensions." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [features] diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 7d08164bdd..2c2ad68b96 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/timestamp" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index f7a15d962b..c1409c2675 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc3", default-features = false, path = "./rpc/runtime-api" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc4", default-features = false, path = "./rpc/runtime-api" } smallvec = "1.4.0" [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 22be6e700b..f26f604471 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,10 +16,10 @@ codec = { package = "parity-scale-codec", version = "1.3.1" } jsonrpc-core = "14.2.0" jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc3", path = "../../../primitives/rpc" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-rc4", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc3", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc4", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index e63b94cb4b..2cd9977704 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../../support" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../../support" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 338a6f1dec..28f972d458 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-treasury" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc3", default-features = false, path = "../balances" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-rc4", default-features = false, path = "../balances" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index f14274d709..e4dbfdfff7 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,18 +14,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } [features] default = ["std"] diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index a98a59acef..aa5f0731f2 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-vesting" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc3", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc3", path = "../balances" } -sp-storage = { version = "2.0.0-rc3", path = "../../primitives/storage" } +sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index 872695758a..ba0aed9387 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-allocator" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,9 +13,9 @@ documentation = "https://docs.rs/sp-allocator" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc3", path = "../std", default-features = false } -sp-core = { version = "2.0.0-rc3", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-rc4", path = "../std", default-features = false } +sp-core = { version = "2.0.0-rc4", path = "../core", default-features = false } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../wasm-interface", default-features = false } log = { version = "0.4.8", optional = true } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 46bd9164ac..8fe0a6d910 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,16 +13,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api-proc-macro = { version = "2.0.0-rc3", path = "proc-macro" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0-rc3", optional = true, path = "../../primitives/state-machine" } +sp-api-proc-macro = { version = "2.0.0-rc4", path = "proc-macro" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../version" } +sp-state-machine = { version = "0.8.0-rc4", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc3", path = "../test-primitives" } +sp-test-primitives = { version = "2.0.0-rc4", path = "../test-primitives" } [features] default = [ "std" ] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 8f5e851fa6..fb426fde88 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 04181d93f0..cf8f0ce47e 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,22 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc3", path = "../" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0-rc3", path = "../../version" } -sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../blockchain" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } +sp-api = { version = "2.0.0-rc4", path = "../" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +sp-version = { version = "2.0.0-rc4", path = "../../version" } +sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../blockchain" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0-rc3", path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +sp-core = { version = "2.0.0-rc4", path = "../../core" } [[bench]] name = "bench" diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 29f385a54a..8e9c922509 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } [features] default = [ "std" ] diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 55148f7af2..d3b336d92a 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" description = "Integration tests for application-crypto" @@ -13,8 +13,8 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../api" } -sp-application-crypto = { version = "2.0.0-rc3", path = "../" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../test-utils/runtime/client" } +sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../api" } +sp-application-crypto = { version = "2.0.0-rc4", path = "../" } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index b4c655c968..c3bef60d1a 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/debug-derive" } [dev-dependencies] rand = "0.7.2" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index b6bbe3d8a6..c7e5485a19 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-arithmetic = { version = "2.0.0-rc3", path = ".." } +sp-arithmetic = { version = "2.0.0-rc4", path = ".." } honggfuzz = "0.5.49" primitive-types = "0.7.0" num-bigint = "0.2" diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 584aef986a..79b8a832fb 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authority-discovery" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" @@ -12,11 +12,11 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index eb52ca3e0c..1c44b9aad7 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authorship" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" @@ -12,9 +12,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 8f8976949d..2b594640fd 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-block-builder" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "The block builder runtime api." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index b4c22a524a..956ae1a8fc 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,7 +18,7 @@ lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc3", path = "../consensus/common" } -sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } -sp-block-builder = { version = "2.0.0-rc3", path = "../block-builder" } -sp-state-machine = { version = "0.8.0-rc3", path = "../state-machine" } +sp-consensus = { version = "0.8.0-rc4", path = "../consensus/common" } +sp-runtime = { version = "2.0.0-rc4", path = "../runtime" } +sp-block-builder = { version = "2.0.0-rc4", path = "../block-builder" } +sp-state-machine = { version = "0.8.0-rc4", path = "../state-machine" } diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml index 2ad9199d86..e091a59245 100644 --- a/primitives/chain-spec/Cargo.toml +++ b/primitives/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-chain-spec" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 24b82f4642..10c7f5a2de 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-aura" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -12,13 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../api" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../runtime" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 978b415dc5..3649230468 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-babe" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" @@ -12,17 +12,17 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } merlin = { version = "2.0", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } -sp-consensus = { version = "0.8.0-rc3", optional = true, path = "../common" } -sp-consensus-vrf = { version = "0.8.0-rc3", path = "../vrf", default-features = false } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../inherents" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../api" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } +sp-consensus = { version = "0.8.0-rc4", optional = true, path = "../common" } +sp-consensus-vrf = { version = "0.8.0-rc4", path = "../vrf", default-features = false } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../inherents" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 26fea37045..eff425e440 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,23 +17,23 @@ targets = ["x86_64-unknown-linux-gnu"] derive_more = "0.99.2" libp2p = { version = "0.19.1", default-features = false } log = "0.4.8" -sp-core = { path= "../../core", version = "2.0.0-rc3"} -sp-inherents = { version = "2.0.0-rc3", path = "../../inherents" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core", version = "2.0.0-rc4"} +sp-inherents = { version = "2.0.0-rc4", path = "../../inherents" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" -sp-std = { version = "2.0.0-rc3", path = "../../std" } -sp-version = { version = "2.0.0-rc3", path = "../../version" } -sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } -sp-utils = { version = "2.0.0-rc3", path = "../../utils" } +sp-std = { version = "2.0.0-rc4", path = "../../std" } +sp-version = { version = "2.0.0-rc4", path = "../../version" } +sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } +sp-utils = { version = "2.0.0-rc4", path = "../../utils" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc3"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc4"} wasm-timer = "0.2.4" [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc3", path = "../../test-primitives" } +sp-test-primitives = { version = "2.0.0-rc4", path = "../../test-primitives" } [features] default = [] diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 9f9fedb76c..5e031235dc 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-pow" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -12,10 +12,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../api" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../runtime" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index 96006fc14c..3c89c05bb1 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-vrf" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" edition = "2018" @@ -14,9 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -sp-std = { version = "2.0.0-rc3", path = "../../std", default-features = false } -sp-core = { version = "2.0.0-rc3", path = "../../core", default-features = false } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../runtime" } +sp-std = { version = "2.0.0-rc4", path = "../../std", default-features = false } +sp-core = { version = "2.0.0-rc4", path = "../../core", default-features = false } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 3c37f57e70..33b4a7bc82 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } @@ -33,9 +33,9 @@ num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.0.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0-rc3", path = "../debug-derive" } -sp-externalities = { version = "0.8.0-rc3", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0-rc3", default-features = false, path = "../storage" } +sp-debug-derive = { version = "2.0.0-rc4", path = "../debug-derive" } +sp-externalities = { version = "0.8.0-rc4", optional = true, path = "../externalities" } +sp-storage = { version = "2.0.0-rc4", default-features = false, path = "../storage" } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } @@ -50,10 +50,10 @@ twox-hash = { version = "1.5.0", default-features = false, optional = true } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0-rc3", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "2.0.0-rc4", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc3", path = "../serializer" } +sp-serializer = { version = "2.0.0-rc4", path = "../serializer" } pretty_assertions = "0.6.1" hex-literal = "0.2.1" rand = "0.7.2" diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index 0b85975fed..41ced29a57 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-database" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index bf58ddfd8f..fd63abcfa7 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 3af61bbeb0..65c59e41e4 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0-rc3" +version = "0.8.0-rc4" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sp-externalities" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-storage = { version = "2.0.0-rc3", path = "../storage" } -sp-std = { version = "2.0.0-rc3", path = "../std" } +sp-storage = { version = "2.0.0-rc4", path = "../storage" } +sp-std = { version = "2.0.0-rc4", path = "../std" } environmental = { version = "1.1.1" } codec = { package = "parity-scale-codec", version = "1.3.1" } diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 27315b0ff9..7e77e1253c 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index 60ed88c110..5cbd497bec 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-tracker" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } [features] default = ["std"] diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 503aa29d29..3532e08da1 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 8bb113b1f1..06df2cc5ed 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0-rc3", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0-rc3", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc3", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0-rc3", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc3", optional = true, path = "../externalities" } -sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../tracing" } +sp-state-machine = { version = "0.8.0-rc4", optional = true, path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../../primitives/wasm-interface", default-features = false } +sp-runtime-interface = { version = "2.0.0-rc4", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "2.0.0-rc4", optional = true, path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-rc4", optional = true, path = "../externalities" } +sp-tracing = { version = "2.0.0-rc4", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.10.0", optional = true } diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index f94d3b14d9..abd7f3d3d5 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc3", path = "../core" } -sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } +sp-core = { version = "2.0.0-rc4", path = "../core" } +sp-runtime = { version = "2.0.0-rc4", path = "../runtime" } lazy_static = "1.4.0" strum = { version = "0.16.0", features = ["derive"] } diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 7982c8ce4d..0a55a3e895 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-npos-elections-compact = { version = "2.0.0-rc3", path = "./compact" } -sp-arithmetic = { version = "2.0.0-rc3", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-npos-elections-compact = { version = "2.0.0-rc4", path = "./compact" } +sp-arithmetic = { version = "2.0.0-rc4", default-features = false, path = "../arithmetic" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0-rc3", path = "../../test-utils" } +substrate-test-utils = { version = "2.0.0-rc4", path = "../../test-utils" } rand = "0.7.3" -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } [features] default = ["std"] diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index 9b4333e385..61d1990a3a 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections-compact" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 02be731592..b7c7dcab65 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,9 +14,9 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-npos-elections = { version = "2.0.0-rc3", path = ".." } -sp-std = { version = "2.0.0-rc3", path = "../../std" } -sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } +sp-npos-elections = { version = "2.0.0-rc4", path = ".." } +sp-std = { version = "2.0.0-rc4", path = "../../std" } +sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index e44a8e8551..44eb1bc0e1 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -12,12 +12,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } [dev-dependencies] -sp-state-machine = { version = "0.8.0-rc3", default-features = false, path = "../state-machine" } +sp-state-machine = { version = "0.8.0-rc4", default-features = false, path = "../state-machine" } [features] default = ["std"] diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index acdf7b7462..f350d317a0 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 332649d266..86809803b4 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-rpc" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0-rc3", path = "../core" } +sp-core = { version = "2.0.0-rc4", path = "../core" } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 12d070b47c..dc37c18629 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,20 @@ documentation = "https://docs.rs/sp-runtime-interface/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-wasm-interface = { version = "2.0.0-rc3", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-tracing = { version = "2.0.0-rc3", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "2.0.0-rc3", path = "proc-macro" } -sp-externalities = { version = "0.8.0-rc3", optional = true, path = "../externalities" } +sp-wasm-interface = { version = "2.0.0-rc4", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-tracing = { version = "2.0.0-rc4", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "2.0.0-rc4", path = "proc-macro" } +sp-externalities = { version = "0.8.0-rc4", optional = true, path = "../externalities" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } [dev-dependencies] -sp-runtime-interface-test-wasm = { version = "2.0.0-rc3", path = "test-wasm" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0-rc3", path = "../core" } -sp-io = { version = "2.0.0-rc3", path = "../io" } +sp-runtime-interface-test-wasm = { version = "2.0.0-rc4", path = "test-wasm" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0-rc4", path = "../core" } +sp-io = { version = "2.0.0-rc4", path = "../io" } rustversion = "1.0.0" trybuild = "1.0.23" diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 67809c1ba2..dfb3840a08 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 8668282943..9ad22599ad 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc3", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-rc4", default-features = false, path = "../" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 304cc1e82e..7973f152bc 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc3", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-rc4", default-features = false, path = "../" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index a68a9b3c92..bdbe7ff902 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc3", path = "../" } -sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor" } -sp-runtime-interface-test-wasm = { version = "2.0.0-rc3", path = "../test-wasm" } -sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-rc3", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc3", path = "../../runtime" } -sp-core = { version = "2.0.0-rc3", path = "../../core" } -sp-io = { version = "2.0.0-rc3", path = "../../io" } +sp-runtime-interface = { version = "2.0.0-rc4", path = "../" } +sc-executor = { version = "0.8.0-rc4", path = "../../../client/executor" } +sp-runtime-interface-test-wasm = { version = "2.0.0-rc4", path = "../test-wasm" } +sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-rc4", path = "../test-wasm-deprecated" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-rc4", path = "../../runtime" } +sp-core = { version = "2.0.0-rc4", path = "../../core" } +sp-io = { version = "2.0.0-rc4", path = "../../io" } tracing = "0.1.13" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index d3508c0e8b..9bc972646f 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0-rc3", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../io" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "2.0.0-rc4", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../inherents" } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } @@ -33,7 +33,7 @@ either = { version = "1.5", default-features = false } [dev-dependencies] serde_json = "1.0.41" rand = "0.7.2" -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } [features] bench = [] diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index dfd3a44053..9361f59d1d 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,10 +13,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0-rc3", default-features = false, path = "../wasm-interface" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../io" } +sp-wasm-interface = { version = "2.0.0-rc4", default-features = false, path = "../wasm-interface" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [dev-dependencies] diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index d46de697fa..66f721602a 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 4abcb80d24..3fdfbe8984 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-session" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-staking = { version = "2.0.0-rc3", default-features = false, path = "../staking" } -sp-runtime = { version = "2.0.0-rc3", optional = true, path = "../runtime" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../staking" } +sp-runtime = { version = "2.0.0-rc4", optional = true, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 7ec400d74a..ac14dde901 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-staking" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 29c8676f7e..2545f52760 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -18,19 +18,19 @@ parking_lot = "0.10.0" hash-db = "0.15.2" trie-db = "0.21.0" trie-root = "0.16.0" -sp-trie = { version = "2.0.0-rc3", path = "../trie" } -sp-core = { version = "2.0.0-rc3", path = "../core" } -sp-panic-handler = { version = "2.0.0-rc3", path = "../panic-handler" } +sp-trie = { version = "2.0.0-rc4", path = "../trie" } +sp-core = { version = "2.0.0-rc4", path = "../core" } +sp-panic-handler = { version = "2.0.0-rc4", path = "../panic-handler" } codec = { package = "parity-scale-codec", version = "1.3.1" } num-traits = "0.2.8" rand = "0.7.2" -sp-externalities = { version = "0.8.0-rc3", path = "../externalities" } +sp-externalities = { version = "0.8.0-rc4", path = "../externalities" } itertools = "0.9" smallvec = "1.4" [dev-dependencies] hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../runtime" } pretty_assertions = "0.6.1" [features] diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index d3a242db4f..b184f7b8d9 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 9d61cb8a4a..63b53bd926 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -13,11 +13,11 @@ documentation = "https://docs.rs/sp-storage/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "2.0.0-rc3", path = "../debug-derive" } +sp-debug-derive = { version = "2.0.0-rc4", path = "../debug-derive" } [features] default = [ "std" ] diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index abc47f6f9a..8e14aeeb83 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-test-primitives" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } [features] diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 5b2217f0f3..117d79bdbe 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-timestamp" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ description = "Substrate core types and inherents for timestamps." targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = { version = "0.2", optional = true } diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index e47d9859c9..30808a6c0e 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tracing" -version = "2.0.0-rc3" +version = "2.0.0-rc4" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 6417ae8c29..a217bdef4a 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -19,10 +19,10 @@ derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../api" } -sp-blockchain = { version = "2.0.0-rc3", optional = true, path = "../blockchain" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } -sp-utils = { version = "2.0.0-rc3", default-features = false, path = "../utils" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../api" } +sp-blockchain = { version = "2.0.0-rc4", optional = true, path = "../blockchain" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } +sp-utils = { version = "2.0.0-rc4", default-features = false, path = "../utils" } [features] default = [ "std" ] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index d99a3d1ae7..1ebc974bfb 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -18,19 +18,19 @@ harness = false [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.21.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.21.0", default-features = false } -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } [dev-dependencies] trie-bench = "0.22.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc3", path = "../runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../runtime" } [features] default = ["std"] diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml index 9ae7beb1ff..96c7825515 100644 --- a/primitives/utils/Cargo.toml +++ b/primitives/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 18357953d7..181b793bd5 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index c2e70ce1e4..8b32cde969 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0-rc3", path = "../std", default-features = false } +sp-std = { version = "2.0.0-rc4", path = "../std", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 3d6914540a..f67f1560c1 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index f5604ceb23..a9d8590f02 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,18 +12,18 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc3", path = "../../client/api" } -sc-light = { version = "2.0.0-rc3", path = "../../client/light" } -sc-client-db = { version = "0.8.0-rc3", features = ["test-helpers"], path = "../../client/db" } -sp-consensus = { version = "0.8.0-rc3", path = "../../primitives/consensus/common" } -sc-executor = { version = "0.8.0-rc3", path = "../../client/executor" } -sc-consensus = { version = "0.8.0-rc3", path = "../../client/consensus/common" } -sc-service = { version = "0.8.0-rc3", default-features = false, features = ["test-helpers"], path = "../../client/service" } +sc-client-api = { version = "2.0.0-rc4", path = "../../client/api" } +sc-light = { version = "2.0.0-rc4", path = "../../client/light" } +sc-client-db = { version = "0.8.0-rc4", features = ["test-helpers"], path = "../../client/db" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } +sc-executor = { version = "0.8.0-rc4", path = "../../client/executor" } +sc-consensus = { version = "0.8.0-rc4", path = "../../client/consensus/common" } +sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../client/service" } futures = "0.3.4" hash-db = "0.15.2" -sp-keyring = { version = "2.0.0-rc3", path = "../../primitives/keyring" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-core = { version = "2.0.0-rc3", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc3", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../primitives/blockchain" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index e307522ead..71987da150 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,35 +13,35 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0-rc3", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/block-builder" } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0-rc3", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc3", optional = true, path = "../../primitives/keyring" } +frame-executive = { version = "2.0.0-rc4", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-rc4", optional = true, path = "../../primitives/keyring" } memory-db = { version = "0.21.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc3"} -sp-core = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-rc3"} -sp-io = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc3", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/version" } -sp-session = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0-rc3", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0-rc3", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc3", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0-rc3", default-features = false, path = "../../frame/timestamp" } -sp-finality-grandpa = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/finality-grandpa" } -sp-trie = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0-rc3", default-features = false, path = "../../primitives/transaction-pool" } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc4"} +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-rc4"} +sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../frame/support" } +sp-version = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/version" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/session" } +sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "2.0.0-rc4", default-features = false, path = "../../frame/babe" } +frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-rc4", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../frame/timestamp" } +sp-finality-grandpa = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/finality-grandpa" } +sp-trie = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.21.0", default-features = false } parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } -sc-service = { version = "0.8.0-rc3", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sc-service = { version = "0.8.0-rc4", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } # 3rd party cfg-if = "0.1.10" @@ -49,10 +49,10 @@ log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0-rc3", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0-rc3", path = "../../client/executor" } -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "./client" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../primitives/state-machine" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../client/block-builder" } +sc-executor = { version = "0.8.0-rc4", path = "../../client/executor" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "./client" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 7a69f5ed22..09f2c3f152 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-client" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,17 +12,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "2.0.0-rc3", path = "../../../client/light" } -sp-consensus = { version = "0.8.0-rc3", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc3", path = "../../../client/block-builder" } -substrate-test-client = { version = "2.0.0-rc3", path = "../../client" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -substrate-test-runtime = { version = "2.0.0-rc3", path = "../../runtime" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } +sc-light = { version = "2.0.0-rc4", path = "../../../client/light" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-rc4", path = "../../../client/block-builder" } +substrate-test-client = { version = "2.0.0-rc4", path = "../../client" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +substrate-test-runtime = { version = "2.0.0-rc4", path = "../../runtime" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0-rc3", path = "../../../client/api" } -sc-consensus = { version = "0.8.0-rc3", path = "../../../client/consensus/common" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../client/api" } +sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service" } futures = "0.3.4" diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index e5c93ef8ad..f29ae2b7bf 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../client" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../client" } parking_lot = "0.10.0" codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0-rc3", path = "../../../client/transaction-pool/graph" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "2.0.0-rc4", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index ee4634f0d1..ed02e8e2fa 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-browser-utils" -version = "0.8.0-rc3" +version = "0.8.0-rc4" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" @@ -22,11 +22,11 @@ js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.6" -sp-database = { version = "2.0.0-rc3", path = "../../primitives/database" } -sc-informant = { version = "0.8.0-rc3", path = "../../client/informant" } -sc-service = { version = "0.8.0-rc3", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network", version = "0.8.0-rc3"} -sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-rc3"} +sp-database = { version = "2.0.0-rc4", path = "../../primitives/database" } +sc-informant = { version = "0.8.0-rc4", path = "../../client/informant" } +sc-service = { version = "0.8.0-rc4", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network", version = "0.8.0-rc4"} +sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-rc4"} # Imported just for the `no_cc` feature clear_on_drop = { version = "0.2.3", features = ["no_cc"] } diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index a1f31f83e8..9eada7bf82 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 6c8410ab76..a1aaea70b1 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 364dc472cb..003b4d9c05 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,15 +12,15 @@ description = "CLI for benchmarking FRAME" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "2.0.0-rc3", path = "../../../frame/benchmarking" } -sp-core = { version = "2.0.0-rc3", path = "../../../primitives/core" } -sc-service = { version = "0.8.0-rc3", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0-rc3", path = "../../../client/cli" } -sc-client-db = { version = "0.8.0-rc3", path = "../../../client/db" } -sc-executor = { version = "0.8.0-rc3", path = "../../../client/executor" } -sp-externalities = { version = "0.8.0-rc3", path = "../../../primitives/externalities" } -sp-runtime = { version = "2.0.0-rc3", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc3", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "2.0.0-rc4", path = "../../../frame/benchmarking" } +sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.8.0-rc4", path = "../../../client/cli" } +sc-client-db = { version = "0.8.0-rc4", path = "../../../client/db" } +sc-executor = { version = "0.8.0-rc4", path = "../../../client/executor" } +sp-externalities = { version = "0.8.0-rc4", path = "../../../primitives/externalities" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } structopt = "0.3.8" codec = { version = "1.3.1", package = "parity-scale-codec" } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index d7e4259635..ec4d06c93c 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "Apache-2.0" @@ -17,10 +17,10 @@ jsonrpc-client-transports = { version = "14.2.0", default-features = false, feat jsonrpc-core = "14.2.0" codec = { package = "parity-scale-codec", version = "1.3.1" } serde = "1" -frame-support = { version = "2.0.0-rc3", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0-rc3", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0-rc3", path = "../../../../client/rpc-api" } +frame-support = { version = "2.0.0-rc4", path = "../../../../frame/support" } +sp-storage = { version = "2.0.0-rc4", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.8.0-rc4", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0-rc3", path = "../../../../frame/system" } +frame-system = { version = "2.0.0-rc4", path = "../../../../frame/system" } tokio = "0.2" diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index a03a08b3ff..1d655bcca3 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0-rc3" +version = "2.0.0-rc4" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "FRAME's system exposed over Substrate RPC" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc3", path = "../../../../client/api" } +sc-client-api = { version = "2.0.0-rc4", path = "../../../../client/api" } codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.4", features = ["compat"] } jsonrpc-core = "14.2.0" @@ -20,16 +20,16 @@ jsonrpc-core-client = "14.2.0" jsonrpc-derive = "14.2.1" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc3", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc3", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc3", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0-rc3", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc3", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0-rc3", path = "../../../../primitives/transaction-pool" } -sp-block-builder = { version = "2.0.0-rc3", path = "../../../../primitives/block-builder" } -sc-rpc-api = { version = "0.8.0-rc3", path = "../../../../client/rpc-api" } +sp-runtime = { version = "2.0.0-rc4", path = "../../../../primitives/runtime" } +sp-api = { version = "2.0.0-rc4", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "2.0.0-rc4", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "2.0.0-rc4", path = "../../../../primitives/core" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../../primitives/transaction-pool" } +sp-block-builder = { version = "2.0.0-rc4", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.8.0-rc4", path = "../../../../client/rpc-api" } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc3", path = "../../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../../../test-utils/runtime/client" } env_logger = "0.7.0" -sc-transaction-pool = { version = "2.0.0-rc3", path = "../../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-rc4", path = "../../../../client/transaction-pool" } diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index c8dd98656b..322935a884 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0-rc3" +version = "0.8.0-rc4" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -- GitLab From 77c4f859e87fdceaf545d9e867bcd0b5299fdf7e Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 26 Jun 2020 10:05:24 +0200 Subject: [PATCH 084/144] Fix an extra semi-colon yielding a wrong error (#6520) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix an extra semi-colon yielding a wrong error * Update client/cli/src/commands/run_cmd.rs Co-authored-by: Bastian Köcher --- client/cli/src/commands/run_cmd.rs | 2 +- client/cli/src/error.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 16bae1ea96..690cb868c5 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -325,7 +325,7 @@ impl CliConfiguration for RunCmd { Error::Input(format!( "Invalid node name '{}'. Reason: {}. If unsure, use none.", name, msg - )); + )) })?; Ok(name) diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 31f6e1c1ff..f091354be1 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -37,6 +37,7 @@ pub enum Error { Input(String), /// Invalid listen multiaddress #[display(fmt="Invalid listen multiaddress")] + #[from(ignore)] InvalidListenMultiaddress, /// Other uncategorized error. #[from(ignore)] -- GitLab From 93a6a53061b9ecb8660c291ab43d083cf51c1f89 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Fri, 26 Jun 2020 11:03:02 +0200 Subject: [PATCH 085/144] Refactor as_sub to make things clearer. (#6503) * Refactor as_sub to make things clearer. - `as_sub` becomes `as_alternative` - `as_sub_limited` becomes `as_derivative` - `as_alternative` and `as_derivative` generate a mutually exclusive set of accounts. * Test fix * Add test * Fix test * Remove `as_alternative`. * Docs. --- frame/proxy/src/tests.rs | 14 ++------- frame/utility/src/benchmarking.rs | 11 ++----- frame/utility/src/lib.rs | 49 ++++++++++--------------------- frame/utility/src/tests.rs | 12 ++++---- 4 files changed, 27 insertions(+), 59 deletions(-) diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 72c9c0d577..63d5c9e575 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -201,19 +201,11 @@ fn filtering_works() { assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); - let sub_id = Utility::sub_account_id(1, 0); - Balances::mutate_account(&sub_id, |a| a.free = 1000); + let derivative_id = Utility::derivative_account_id(1, 0); + Balances::mutate_account(&derivative_id, |a| a.free = 1000); let inner = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); - let call = Box::new(Call::Utility(UtilityCall::as_sub(0, inner.clone()))); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); - - let call = Box::new(Call::Utility(UtilityCall::as_limited_sub(0, inner.clone()))); + let call = Box::new(Call::Utility(UtilityCall::as_derivative(0, inner.clone()))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_event(RawEvent::ProxyExecuted(Ok(()))); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 27696404bf..8d98178957 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -38,13 +38,7 @@ benchmarks! { let caller = account("caller", 0, SEED); }: _(RawOrigin::Signed(caller), calls) - as_sub { - let u in 0 .. 1000; - let caller = account("caller", u, SEED); - let call = Box::new(frame_system::Call::remark(vec![]).into()); - }: _(RawOrigin::Signed(caller), u as u16, call) - - as_limited_sub { + as_derivative { let u in 0 .. 1000; let caller = account("caller", u, SEED); let call = Box::new(frame_system::Call::remark(vec![]).into()); @@ -61,8 +55,7 @@ mod tests { fn test_benchmarks() { new_test_ext().execute_with(|| { assert_ok!(test_benchmark_batch::()); - assert_ok!(test_benchmark_as_sub::()); - assert_ok!(test_benchmark_as_limited_sub::()); + assert_ok!(test_benchmark_as_derivative::()); }); } } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 3759a2afcd..47ca4f13e7 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -16,7 +16,7 @@ // limitations under the License. //! # Utility Module -//! A stateless module with helpers for dispatch management. +//! A stateless module with helpers for dispatch management which does no re-authentication. //! //! - [`utility::Trait`](./trait.Trait.html) //! - [`Call`](./enum.Call.html) @@ -29,10 +29,15 @@ //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature //! verify, or in combination with one of the other two dispatch functionality. //! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from -//! an alternative signed origin. Each account has 2**16 possible "pseudonyms" (alternative +//! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. +//! Derivative accounts are, for the purposes of proxy filtering considered exactly the same as +//! the oigin and are thus hampered with the origin's filters. +//! +//! Since proxy filters are respected in all dispatches of this module, it should never need to be +//! filtered by any proxy. //! //! ## Interface //! @@ -42,7 +47,7 @@ //! * `batch` - Dispatch multiple calls from the sender's origin. //! //! #### For pseudonymal dispatch -//! * `as_sub` - Dispatch a call from a secondary ("sub") signed origin. +//! * `as_derivative` - Dispatch a call from a derivative signed origin. //! //! [`Call`]: ./enum.Call.html //! [`Trait`]: ./trait.Trait.html @@ -155,31 +160,6 @@ decl_module! { Self::deposit_event(Event::BatchCompleted); } - /// Send a call through an indexed pseudonym of the sender. - /// - /// NOTE: If you need to ensure that any account-based filtering is honored (i.e. because - /// you expect `proxy` to have been used prior in the call stack and you want it to apply to - /// any sub-accounts), then use `as_limited_sub` instead. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - Base weight: 2.861 µs - /// - Plus the weight of the `call` - /// # - #[weight = ( - call.get_dispatch_info().weight.saturating_add(3_000_000), - call.get_dispatch_info().class, - )] - fn as_sub(origin, index: u16, call: Box<::Call>) -> DispatchResult { - let who = ensure_signed(origin)?; - - // This is a freshly authenticated new account, the origin restrictions doesn't apply. - let pseudonym = Self::sub_account_id(who, index); - call.dispatch(frame_system::RawOrigin::Signed(pseudonym).into()) - .map(|_| ()).map_err(|e| e.error) - } - /// Send a call through an indexed pseudonym of the sender. /// /// Filter from origin are passed along. The call will be dispatched with an origin which @@ -187,7 +167,10 @@ decl_module! { /// /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. /// because you expect `proxy` to have been used prior in the call stack and you do not want - /// the call restrictions to apply to any sub-accounts), then use `as_sub` instead. + /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` + /// in the Multisig pallet instead. + /// + /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. /// @@ -199,10 +182,10 @@ decl_module! { call.get_dispatch_info().weight.saturating_add(3_000_000), call.get_dispatch_info().class, )] - fn as_limited_sub(origin, index: u16, call: Box<::Call>) -> DispatchResult { + fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResult { let mut origin = origin; let who = ensure_signed(origin.clone())?; - let pseudonym = Self::sub_account_id(who, index); + let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); call.dispatch(origin).map(|_| ()).map_err(|e| e.error) } @@ -210,8 +193,8 @@ decl_module! { } impl Module { - /// Derive a sub-account ID from the owner account and the sub-account index. - pub fn sub_account_id(who: T::AccountId, index: u16) -> T::AccountId { + /// Derive a derivative account ID from the owner account and the sub-account index. + pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index e0f8426d28..c0a6499250 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -138,16 +138,16 @@ fn expect_event>(e: E) { } #[test] -fn as_sub_works() { +fn as_derivative_works() { new_test_ext().execute_with(|| { - let sub_1_0 = Utility::sub_account_id(1, 0); + let sub_1_0 = Utility::derivative_account_id(1, 0); assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); - assert_noop!(Utility::as_sub( + assert_noop!(Utility::as_derivative( Origin::signed(1), 1, Box::new(Call::Balances(BalancesCall::transfer(6, 3))), ), BalancesError::::InsufficientBalance); - assert_ok!(Utility::as_sub( + assert_ok!(Utility::as_derivative( Origin::signed(1), 0, Box::new(Call::Balances(BalancesCall::transfer(2, 3))), @@ -158,9 +158,9 @@ fn as_sub_works() { } #[test] -fn as_sub_filters() { +fn as_derivative_filters() { new_test_ext().execute_with(|| { - assert_noop!(Utility::as_sub( + assert_noop!(Utility::as_derivative( Origin::signed(1), 1, Box::new(Call::System(frame_system::Call::remark(vec![]))), -- GitLab From 397068219c7a43a743f3979947efec35a8e93536 Mon Sep 17 00:00:00 2001 From: s3krit Date: Fri, 26 Jun 2020 13:13:00 +0200 Subject: [PATCH 086/144] [CI] Fix warning in polkadot-companion-label action (#6514) --- .github/workflows/polkadot-companion-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml index 20aaa98a23..27f743e1bd 100644 --- a/.github/workflows/polkadot-companion-labels.yml +++ b/.github/workflows/polkadot-companion-labels.yml @@ -17,7 +17,7 @@ jobs: contexts: 'continuous-integration/gitlab-check-polkadot-companion-build' timeout: 1800 notPresentTimeout: 3600 # It can take quite a while before the job starts... - failedStates: failure + failureStates: failure interruptedStates: error # Error = job was probably cancelled. We don't want to label the PR in that case - name: Label success uses: andymckay/labeler@master -- GitLab From 1a5ebf548590844faac6264a879b72b822a50173 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 26 Jun 2020 15:35:43 +0200 Subject: [PATCH 087/144] Remove @cecton from CODEOWNERS (#6524) * Initial commit Forked at: 397068219c7a43a743f3979947efec35a8e93536 Parent branch: origin/master * Remove @cecton from CODEOWNERS --- docs/CODEOWNERS | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index b86846aefe..d9342de399 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -66,7 +66,3 @@ # Prometheus endpoint /utils/prometheus/ @mxinden - -# CLI API -/client/cli @cecton -/client/cli-derive @cecton -- GitLab From 4cc4b76e361f55de8ae5dd2bae8226cacf4addcb Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 26 Jun 2020 15:36:03 +0200 Subject: [PATCH 088/144] Impl From for OpaqueExtrinsic (#6522) --- bin/node/cli/src/service.rs | 12 +++------ .../src/generic/unchecked_extrinsic.rs | 27 +++++++++++++++++++ primitives/runtime/src/lib.rs | 9 ++++++- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index afc9e23d68..3279490363 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -424,13 +424,12 @@ mod tests { use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; - use codec::{Encode, Decode}; + use codec::Encode; use sp_core::{crypto::Pair as CryptoPair, H256}; use sp_runtime::{ generic::{BlockId, Era, Digest, SignedPayload}, traits::{Block as BlockT, Header as HeaderT}, traits::Verify, - OpaqueExtrinsic, }; use sp_timestamp; use sp_finality_tracker; @@ -605,16 +604,13 @@ mod tests { signer.sign(payload) }); let (function, extra, _) = raw_payload.deconstruct(); - let xt = UncheckedExtrinsic::new_signed( + index += 1; + UncheckedExtrinsic::new_signed( function, from.into(), signature.into(), extra, - ).encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - - index += 1; - OpaqueExtrinsic(v) + ).into() }, ); } diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 41ff2609fc..d16d404ddf 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -27,6 +27,7 @@ use crate::{ }, generic::CheckedExtrinsic, transaction_validity::{TransactionValidityError, InvalidTransaction}, + OpaqueExtrinsic, }; const TRANSACTION_VERSION: u8 = 4; @@ -316,6 +317,23 @@ where } } +impl From> + for OpaqueExtrinsic +where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: SignedExtension, +{ + fn from(extrinsic: UncheckedExtrinsic) -> Self { + OpaqueExtrinsic::from_bytes(extrinsic.encode().as_slice()) + .expect( + "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ + raw Vec encoding; qed" + ) + } +} + #[cfg(test)] mod tests { use super::*; @@ -424,4 +442,13 @@ mod tests { let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(as_vec.encode(), encoded); } + + #[test] + fn conversion_to_opaque() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ux.encode(); + let opaque: OpaqueExtrinsic = ux.into(); + let opaque_encoded = opaque.encode(); + assert_eq!(opaque_encoded, encoded); + } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 881ba3d724..b27cb0c633 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -714,7 +714,14 @@ macro_rules! assert_eq_error_rate { /// Simple blob to hold an extrinsic without committing to its format and ensure it is serialized /// correctly. #[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] -pub struct OpaqueExtrinsic(pub Vec); +pub struct OpaqueExtrinsic(Vec); + +impl OpaqueExtrinsic { + /// Convert an encoded extrinsic to an `OpaqueExtrinsic`. + pub fn from_bytes(mut bytes: &[u8]) -> Result { + OpaqueExtrinsic::decode(&mut bytes) + } +} #[cfg(feature = "std")] impl parity_util_mem::MallocSizeOf for OpaqueExtrinsic { -- GitLab From 67513a9adb5053a878ed0d04efcca3c2fa9bb856 Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Sat, 27 Jun 2020 01:37:45 +1200 Subject: [PATCH 089/144] Implement Contains for pallet-membership (#6518) * implement Contains for pallet-membership * bump version --- bin/node/runtime/src/lib.rs | 2 +- frame/membership/src/lib.rs | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e3c9c2b95f..c5c11fe577 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -98,7 +98,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 254, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 71b0902838..c8563b52f8 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -26,7 +26,7 @@ use sp_std::prelude::*; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers, EnsureOrigin}, + traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains}, }; use frame_system::{self as system, ensure_signed}; @@ -264,6 +264,16 @@ impl, I: Instance> Module { } } +impl, I: Instance> Contains for Module { + fn sorted_members() -> Vec { + Self::members() + } + + fn count() -> usize { + Members::::decode_len().unwrap_or(0) + } +} + #[cfg(test)] mod tests { use super::*; -- GitLab From 11d2899793b27e4fe6695a7b3d9cd2962b536258 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 26 Jun 2020 16:48:11 +0200 Subject: [PATCH 090/144] Increase the limit for the maximum size of the telemetry name (#6523) * Increase the limit for the maximum size of the telemetry name * Fix test --- client/cli/src/commands/run_cmd.rs | 4 +++- client/cli/src/config.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 690cb868c5..de5589196f 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -610,7 +610,9 @@ mod tests { #[test] fn tests_node_name_bad() { - assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); + assert!(is_node_name_valid( + "very very long names are really not very cool for the ui at all, really they're not" + ).is_err()); assert!(is_node_name_valid("Dots.not.Ok").is_err()); assert!(is_node_name_valid("http://visit.me").is_err()); assert!(is_node_name_valid("https://visit.me").is_err()); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 598acd0ab9..5563f46115 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -36,7 +36,7 @@ use std::net::SocketAddr; use std::path::PathBuf; /// The maximum number of characters for a node name. -pub(crate) const NODE_NAME_MAX_LENGTH: usize = 32; +pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; /// default sub directory to store network config pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &'static str = "network"; -- GitLab From 0a91a5f71224d05990b50d1aa1de5478fa31039e Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sun, 28 Jun 2020 19:53:58 +0200 Subject: [PATCH 091/144] Fix some broken benchmarks (#6528) --- frame/democracy/src/benchmarking.rs | 6 +++--- frame/multisig/src/benchmarking.rs | 5 ++--- frame/scheduler/src/benchmarking.rs | 13 +++++++------ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index d0bd732448..ba3b9a0b13 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -74,13 +74,13 @@ fn add_referendum(n: u32) -> Result { 0.into(), ); let referendum_index: ReferendumIndex = ReferendumCount::get() - 1; - let _ = T::Scheduler::schedule_named( + T::Scheduler::schedule_named( (DEMOCRACY_ID, referendum_index).encode(), - 0.into(), + 1.into(), None, 63, Call::enact_proposal(proposal_hash, referendum_index).into(), - ); + ).map_err(|_| "failed to schedule named")?; Ok(referendum_index) } diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 9479c16cb2..8113d179cd 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, account}; -use sp_runtime::traits::{Bounded, Saturating}; +use sp_runtime::traits::Bounded; use core::convert::TryInto; use crate::Module as Multisig; @@ -36,8 +36,7 @@ fn setup_multi(s: u32, z: u32) for i in 0 .. s { let signatory = account("signatory", i, SEED); // Give them some balance for a possible deposit - let deposit = T::DepositBase::get() + T::DepositFactor::get() * s.into(); - let balance = T::Currency::minimum_balance().saturating_mul(100.into()) + deposit; + let balance = BalanceOf::::max_value(); T::Currency::make_free_balance_be(&signatory, balance); signatories.push(signatory); } diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 975c10e3b6..5c580b5525 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -29,6 +29,7 @@ use crate::Module as Scheduler; use frame_system::Module as System; const MAX_SCHEDULED: u32 = 50; +const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { @@ -55,7 +56,7 @@ benchmarks! { schedule { let s in 0 .. MAX_SCHEDULED; - let when = T::BlockNumber::one(); + let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. @@ -72,7 +73,7 @@ benchmarks! { cancel { let s in 1 .. MAX_SCHEDULED; - let when: T::BlockNumber = 2.into(); + let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); @@ -92,7 +93,7 @@ benchmarks! { schedule_named { let s in 0 .. MAX_SCHEDULED; let id = s.encode(); - let when = T::BlockNumber::one(); + let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. @@ -109,7 +110,7 @@ benchmarks! { cancel_named { let s in 1 .. MAX_SCHEDULED; - let when = T::BlockNumber::one(); + let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; }: _(RawOrigin::Root, 0.encode()) @@ -127,9 +128,9 @@ benchmarks! { on_initialize { let s in 0 .. MAX_SCHEDULED; - let when = T::BlockNumber::one(); + let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - }: { Scheduler::::on_initialize(T::BlockNumber::one()); } + }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } verify { assert_eq!(System::::event_count(), s); // Next block should have all the schedules again -- GitLab From 99ee2d7f57c6cc6e307330ed6b7891e0179813d6 Mon Sep 17 00:00:00 2001 From: chenwei Date: Mon, 29 Jun 2020 02:17:15 +0800 Subject: [PATCH 092/144] Implement `()` for `Happened` (#6529) --- frame/support/src/traits.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index f25ff67efb..b36559c363 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -329,6 +329,10 @@ pub trait Happened { fn happened(t: &T); } +impl Happened for () { + fn happened(_: &T) {} +} + /// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this /// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this /// would break the ability to have custom impls of `StoredValue`. The other workaround is to -- GitLab From a273b48a0f32e7f7a670d3698453e3249521865b Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 29 Jun 2020 15:59:32 +0200 Subject: [PATCH 093/144] Allow `retract_tip` on `tip_new` (#6511) * Allow `retract_tip` on `tip_new` * initial migration code * test migration * make pub * bump spec --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 4 +- frame/treasury/Cargo.toml | 1 + frame/treasury/src/lib.rs | 97 ++++++++++++++++++++++++++++----- frame/treasury/src/tests.rs | 106 ++++++++++++++++++++++++++++++++++++ 5 files changed, 194 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89b24d0826..0df37db5ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4665,6 +4665,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "sp-storage", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c5c11fe577..969e66653e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -97,8 +97,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 254, - impl_version: 1, + spec_version: 255, + impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 28f972d458..dfab1aca43 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -25,6 +25,7 @@ frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = " [dev-dependencies] sp-io ={ version = "2.0.0-rc4", path = "../../primitives/io" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-storage = { version = "2.0.0-rc4", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index e67ace5475..bb139c4cc6 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -192,13 +192,17 @@ pub struct OpenTip< reason: Hash, /// The account to be tipped. who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, + /// The account who began this tip. + finder: AccountId, + /// The amount held on deposit for this tip. + deposit: Balance, /// The block number at which this tip will close if `Some`. If `None`, then no closing is /// scheduled. closes: Option, /// The members who have voted for this tip. Sorted by AccountId. tips: Vec<(AccountId, Balance)>, + /// Whether this tip should result in the finder taking a fee. + finders_fee: bool, } decl_storage! { @@ -428,8 +432,15 @@ decl_module! { T::Currency::reserve(&finder, deposit)?; Reasons::::insert(&reason_hash, &reason); - let finder = Some((finder, deposit)); - let tip = OpenTip { reason: reason_hash, who, finder, closes: None, tips: vec![] }; + let tip = OpenTip { + reason: reason_hash, + who, + finder, + deposit, + closes: None, + tips: vec![], + finders_fee: true + }; Tips::::insert(&hash, tip); Self::deposit_event(RawEvent::NewTip(hash)); } @@ -457,12 +468,13 @@ decl_module! { fn retract_tip(origin, hash: T::Hash) { let who = ensure_signed(origin)?; let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - let (finder, deposit) = tip.finder.ok_or(Error::::NotFinder)?; - ensure!(finder == who, Error::::NotFinder); + ensure!(tip.finder == who, Error::::NotFinder); Reasons::::remove(&tip.reason); Tips::::remove(&hash); - let _ = T::Currency::unreserve(&who, deposit); + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&who, tip.deposit); + } Self::deposit_event(RawEvent::TipRetracted(hash)); } @@ -501,8 +513,16 @@ decl_module! { Reasons::::insert(&reason_hash, &reason); Self::deposit_event(RawEvent::NewTip(hash.clone())); - let tips = vec![(tipper, tip_value)]; - let tip = OpenTip { reason: reason_hash, who, finder: None, closes: None, tips }; + let tips = vec![(tipper.clone(), tip_value)]; + let tip = OpenTip { + reason: reason_hash, + who, + finder: tipper, + deposit: Zero::zero(), + closes: None, + tips, + finders_fee: false, + }; Tips::::insert(&hash, tip); } @@ -667,15 +687,17 @@ impl Module { let treasury = Self::account_id(); let max_payout = Self::pot(); let mut payout = tips[tips.len() / 2].1.min(max_payout); - if let Some((finder, deposit)) = tip.finder { - let _ = T::Currency::unreserve(&finder, deposit); - if finder != tip.who { + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&tip.finder, tip.deposit); + } + if tip.finders_fee { + if tip.finder != tip.who { // pay out the finder's fee. let finders_fee = T::TipFindersFee::get() * payout; payout -= finders_fee; // this should go through given we checked it's at most the free balance, but still // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &finder, finders_fee, KeepAlive); + let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); } } // same as above: best-effort only. @@ -753,6 +775,55 @@ impl Module { // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } + + pub fn migrate_retract_tip_for_tip_new() { + /// An open tipping "motion". Retains all details of a tip including information on the finder + /// and the members who have voted. + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + use frame_support::{Twox64Concat, migration::StorageKeyIterator}; + + for (hash, old_tip) in StorageKeyIterator::< + T::Hash, + OldOpenTip, T::BlockNumber, T::Hash>, + Twox64Concat, + >::new(b"Treasury", b"Tips").drain() + { + let (finder, deposit, finders_fee) = match old_tip.finder { + Some((finder, deposit)) => (finder, deposit, true), + None => (T::AccountId::default(), Zero::zero(), false), + }; + let new_tip = OpenTip { + reason: old_tip.reason, + who: old_tip.who, + finder, + deposit, + closes: old_tip.closes, + tips: old_tip.tips, + finders_fee + }; + Tips::::insert(hash, new_tip) + } + } } impl OnUnbalanced> for Module { diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 027e52c1bf..68820ffd5d 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -293,6 +293,7 @@ fn close_tip_works() { #[test] fn retract_tip_works() { new_test_ext().execute_with(|| { + // with report awesome Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); @@ -303,6 +304,17 @@ fn retract_tip_works() { assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); System::set_block_number(2); assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + + // with tip new + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone())); + System::set_block_number(2); + assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); }); } @@ -544,3 +556,97 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed }); } + +#[test] +fn test_last_reward_migration() { + use sp_storage::Storage; + + let mut s = Storage::default(); + + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + let reason1 = BlakeTwo256::hash(b"reason1"); + let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); + + let old_tip_finder = OldOpenTip:: { + reason: reason1, + who: 10, + finder: Some((20, 30)), + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let reason2 = BlakeTwo256::hash(b"reason2"); + let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); + + let old_tip_no_finder = OldOpenTip:: { + reason: reason2, + who: 20, + finder: None, + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let data = vec![ + ( + Tips::::hashed_key_for(hash1), + old_tip_finder.encode().to_vec() + ), + ( + Tips::::hashed_key_for(hash2), + old_tip_no_finder.encode().to_vec() + ), + ]; + + s.top = data.into_iter().collect(); + sp_io::TestExternalities::new(s).execute_with(|| { + Treasury::migrate_retract_tip_for_tip_new(); + + // Test w/ finder + assert_eq!( + Tips::::get(hash1), + Some(OpenTip { + reason: reason1, + who: 10, + finder: 20, + deposit: 30, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: true, + }) + ); + + // Test w/o finder + assert_eq!( + Tips::::get(hash2), + Some(OpenTip { + reason: reason2, + who: 20, + finder: Default::default(), + deposit: 0, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: false, + }) + ); + }); +} -- GitLab From fd55c45a0a1fd705297e9a0888acdf277e1149a5 Mon Sep 17 00:00:00 2001 From: Toralf Wittner Date: Tue, 30 Jun 2020 10:02:51 +0200 Subject: [PATCH 094/144] Update to libp2p v0.20.1 (#6465) * Update to libp2p-0.20.0 * Update to `libp2p-0.20.1`. Co-authored-by: Pierre Krieger --- Cargo.lock | 426 +++++++++++++++---------- Cargo.toml | 2 +- bin/node/browser-testing/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 4 +- client/network/src/discovery.rs | 2 +- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 3 +- client/telemetry/src/worker.rs | 21 +- client/telemetry/src/worker/node.rs | 13 +- primitives/consensus/common/Cargo.toml | 2 +- 14 files changed, 291 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0df37db5ed..8b0273d199 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,26 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +[[package]] +name = "aead" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher-trait", +] + [[package]] name = "aes-ctr" version = "0.3.0" @@ -28,6 +48,20 @@ dependencies = [ "stream-cipher", ] +[[package]] +name = "aes-gcm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "834a6bda386024dbb7c8fc51322856c10ffe69559f972261c868485f5759c638" +dependencies = [ + "aead", + "aes", + "block-cipher-trait", + "ghash", + "subtle 2.2.2", + "zeroize", +] + [[package]] name = "aes-soft" version = "0.3.3" @@ -225,7 +259,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95fd83426b89b034bf4e9ceb9c533c2f2386b813fd3dcae0a425ec6f1837d78a" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "rustls", "webpki", "webpki-roots 0.19.0", @@ -558,12 +592,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] -name = "chacha20-poly1305-aead" -version = "0.1.2" +name = "chacha20" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77d2058ba29594f69c75e8a9018e0485e3914ca5084e3613cd64529042f5423b" +checksum = "f6a7ae4c498f8447d86baef0fa0831909333f558866fabcb21600625ac5a31c7" dependencies = [ - "constant_time_eq", + "stream-cipher", + "zeroize", +] + +[[package]] +name = "chacha20poly1305" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48901293601228db2131606f741db33561f7576b5d19c99cd66222380a7dc863" +dependencies = [ + "aead", + "chacha20", + "poly1305", + "stream-cipher", + "zeroize", ] [[package]] @@ -1251,7 +1299,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", ] [[package]] @@ -1329,7 +1377,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" dependencies = [ "either", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 2.0.2", "log", "num-traits 0.2.11", @@ -1618,9 +1666,9 @@ checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" [[package]] name = "futures" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" +checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" dependencies = [ "futures-channel", "futures-core", @@ -1633,9 +1681,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" dependencies = [ "futures-core", "futures-sink", @@ -1652,9 +1700,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" +checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" [[package]] name = "futures-core-preview" @@ -1679,7 +1727,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ "futures 0.1.29", - "futures 0.3.4", + "futures 0.3.5", "lazy_static", "log", "parking_lot 0.9.0", @@ -1690,9 +1738,9 @@ dependencies = [ [[package]] name = "futures-executor" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" +checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" dependencies = [ "futures-core", "futures-task", @@ -1702,15 +1750,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" +checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" [[package]] name = "futures-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -1720,15 +1768,18 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" +checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" [[package]] name = "futures-task" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" +checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +dependencies = [ + "once_cell", +] [[package]] name = "futures-timer" @@ -1748,9 +1799,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" +checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" dependencies = [ "futures 0.1.29", "futures-channel", @@ -1760,6 +1811,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", + "pin-project", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1785,7 +1837,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0a73299e4718f5452e45980fc1d6957a070abe308d3700b63b8673f47e1c2b3" dependencies = [ "bytes 0.5.4", - "futures 0.3.4", + "futures 0.3.5", + "memchr", + "pin-project", +] + +[[package]] +name = "futures_codec" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" +dependencies = [ + "bytes 0.5.4", + "futures 0.3.5", "memchr", "pin-project", ] @@ -1839,6 +1903,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +dependencies = [ + "polyval", +] + [[package]] name = "gimli" version = "0.20.0" @@ -2260,7 +2333,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "futures-timer 2.0.2", ] @@ -2531,7 +2604,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c7f36acb1841d4c701d30ae1f2cfd242e805991443f75f6935479ed3de64903" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "js-sys", "kvdb", "kvdb-memorydb", @@ -2596,12 +2669,12 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057eba5432d3e740e313c6e13c9153d0cb76b4f71bfc2e5242ae5bdb7d41af67" +checksum = "db81113df355dea9dddfcb01cd867555298dca29d915f25d1b1a0aad2e29338b" dependencies = [ "bytes 0.5.4", - "futures 0.3.4", + "futures 0.3.5", "lazy_static", "libp2p-core", "libp2p-core-derive", @@ -2619,7 +2692,7 @@ dependencies = [ "libp2p-websocket", "libp2p-yamux", "multihash", - "parity-multiaddr 0.9.0", + "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", "smallvec 1.4.0", @@ -2628,23 +2701,23 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.19.0" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80a6000296bdbff540b6c00ef82108ef23aa68d195b9333823ea491562c338d7" +checksum = "3a0387b930c3d4c2533dc4893c1e0394185ddcc019846121b1b27491e45a2c08" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", "log", "multihash", "multistream-select", - "parity-multiaddr 0.9.0", + "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", "prost", @@ -2655,7 +2728,7 @@ dependencies = [ "sha2", "smallvec 1.4.0", "thiserror", - "unsigned-varint", + "unsigned-varint 0.4.0", "void", "zeroize", ] @@ -2676,18 +2749,18 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cc186d9a941fd0207cf8f08ef225a735e2d7296258f570155e525f6ee732f87" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "log", ] [[package]] name = "libp2p-identify" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6438ed8ca240c7635c9caa3be6c5258bc0058553ae97ba81737f04e5d33804f5" +checksum = "62f76075b170d908bae616f550ade410d9d27c013fa69042551dbfc757c7c094" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "libp2p-swarm", "log", @@ -2699,16 +2772,16 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.19.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d6c1d5100973527ae70d82687465b17049c1b717a7964de38b8e65000878ff" +checksum = "f7c819a5425b2eb3416d67e9c868c5c1e922b6658655e06b9eeafaa41304b876" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.4", "either", "fnv", - "futures 0.3.4", - "futures_codec", + "futures 0.3.5", + "futures_codec 0.4.1", "libp2p-core", "libp2p-swarm", "log", @@ -2719,22 +2792,22 @@ dependencies = [ "sha2", "smallvec 1.4.0", "uint", - "unsigned-varint", + "unsigned-varint 0.4.0", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b00163d13f705aae67c427bea0575f8aaf63da6524f9bd4a5a093b8bda0b38" +checksum = "7f55b2d4b80986e5bf158270ab23268ec0e7f644ece5436fbaabc5155472f357" dependencies = [ "async-std", "data-encoding", "dns-parser", "either", - "futures 0.3.4", + "futures 0.3.5", "lazy_static", "libp2p-core", "libp2p-swarm", @@ -2748,28 +2821,28 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ce63313ad4bce2d76e54c292a1293ea47a0ebbe16708f1513fa62184992f53" +checksum = "be7d913a4cd57de2013257ec73f07d77bfce390b370023e2d59083e5ca079864" dependencies = [ "bytes 0.5.4", "fnv", - "futures 0.3.4", - "futures_codec", + "futures 0.3.5", + "futures_codec 0.4.1", "libp2p-core", "log", "parking_lot 0.10.2", - "unsigned-varint", + "unsigned-varint 0.4.0", ] [[package]] name = "libp2p-noise" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84fd504e27b0eadd451e06b67694ef714bd8374044e7db339bb0cdb83755ddf4" +checksum = "a03db664653369f46ee03fcec483a378c20195089bb43a26cb9fb0058009ac88" dependencies = [ "curve25519-dalek", - "futures 0.3.4", + "futures 0.3.5", "lazy_static", "libp2p-core", "log", @@ -2785,11 +2858,11 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb3c4f9273313357d4977799aec69f581cfe9568854919c5b8066018ccf59f5" +checksum = "b8dedd34e35a9728d52d59ef36a218e411359a353f9011b2574b86ee790978f6" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "libp2p-swarm", "log", @@ -2800,13 +2873,13 @@ dependencies = [ [[package]] name = "libp2p-secio" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b73f0cc119c83a5b619d6d11074a319fdb4aa4daf8088ade00d511418566e28" +checksum = "c99b3c33e96bb402486d5b4f7cbeab14e66e6a2ed010abbb5bb032a05460bfda" dependencies = [ "aes-ctr", "ctr", - "futures 0.3.4", + "futures 0.3.5", "hmac", "js-sys", "lazy_static", @@ -2830,11 +2903,11 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a8101a0e0d5f04562137a476bf5f5423cd5bdab2f7e43a75909668e63cb102" +checksum = "ce53ff4d127cf8b39adf84dbd381ca32d49bd85788cee08e6669da2495993930" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "log", "rand 0.7.3", @@ -2845,12 +2918,12 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309f95fce9bec755eff5406f8b822fd3969990830c2b54f752e1fc181d5ace3e" +checksum = "9481500c5774c62e8c413e9535b3f33a0e3dbacf2da63b8d3056c686a9df4146" dependencies = [ "async-std", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "get_if_addrs", "ipnet", @@ -2865,7 +2938,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f59fdbb5706f2723ca108c088b1c7a37f735a8c328021f0508007162627e9885" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -2875,14 +2948,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "085fbe4c05c4116c2164ab4d5a521eb6e00516c444f61b3ee9f68c7b1e53580b" +checksum = "7e4440551bf6519e0a684cd859ea809aec6d798f686e0d6ed03a28c3e76849b8" dependencies = [ "async-tls", - "bytes 0.5.4", "either", - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "log", "quicksink", @@ -2896,11 +2968,11 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b305d3a8981e68f11c0e17f2d11d5c52fae95e0d7c283f9e462b5b2dab413b2" +checksum = "8da33e7b5f49c75c6a8afb0b8d1e229f5fa48be9f39bd14cdbc21459a02ac6fc" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p-core", "parking_lot 0.10.2", "thiserror", @@ -3197,7 +3269,7 @@ dependencies = [ "sha-1", "sha2", "sha3", - "unsigned-varint", + "unsigned-varint 0.3.3", ] [[package]] @@ -3208,16 +3280,16 @@ checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" [[package]] name = "multistream-select" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74cdcf7cfb3402881e15a1f95116cb033d69b33c83d481e1234777f5ef0c3d2c" +checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" dependencies = [ "bytes 0.5.4", - "futures 0.3.4", + "futures 0.3.5", "log", "pin-project", "smallvec 1.4.0", - "unsigned-varint", + "unsigned-varint 0.4.0", ] [[package]] @@ -3318,7 +3390,7 @@ dependencies = [ name = "node-browser-testing" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "jsonrpc-core", "libp2p", @@ -3339,7 +3411,7 @@ dependencies = [ "frame-benchmarking-cli", "frame-support", "frame-system", - "futures 0.3.4", + "futures 0.3.5", "hex-literal", "jsonrpc-core", "log", @@ -3581,7 +3653,7 @@ dependencies = [ name = "node-template" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "log", "node-template-runtime", "parking_lot 0.10.2", @@ -3646,7 +3718,7 @@ dependencies = [ "frame-support", "frame-system", "fs_extra", - "futures 0.3.4", + "futures 0.3.5", "log", "node-executor", "node-primitives", @@ -4731,15 +4803,15 @@ dependencies = [ "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.3.3", "url 2.1.1", ] [[package]] name = "parity-multiaddr" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ca96399f4a01aa89c59220c4f52ac371940eb4e53e3ce990da796f364bdf69" +checksum = "cc20af3143a62c16e7c9e92ea5c6ae49f7d271d97d4d8fe73afc28f0514a3d0f" dependencies = [ "arrayref", "bs58", @@ -4749,7 +4821,7 @@ dependencies = [ "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.4.0", "url 2.1.1", ] @@ -4765,7 +4837,7 @@ dependencies = [ "sha-1", "sha2", "sha3", - "unsigned-varint", + "unsigned-varint 0.3.3", ] [[package]] @@ -4977,18 +5049,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.9" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f6a7f5eee6292c559c793430c55c00aea9d3b3d1905e855806ca4d7253426a2" +checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.9" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8988430ce790d8682672117bc06dda364c0be32d3abd738234f19f3240bad99a" +checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2", "quote 1.0.6", @@ -5003,9 +5075,9 @@ checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" [[package]] name = "pin-utils" -version = "0.1.0-alpha.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" @@ -5037,6 +5109,25 @@ dependencies = [ "web-sys", ] +[[package]] +name = "poly1305" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5829f50f48e9ddb79f3f7c3097029d0caee30f8286accb241416df603b080b8" +dependencies = [ + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +dependencies = [ + "cfg-if", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -5799,7 +5890,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "pin-project", "static_assertions", ] @@ -5835,7 +5926,7 @@ dependencies = [ "bytes 0.5.4", "derive_more", "env_logger 0.7.1", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "libp2p", "log", @@ -5862,7 +5953,7 @@ dependencies = [ name = "sc-basic-authorship" version = "0.8.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -5936,7 +6027,7 @@ dependencies = [ "derive_more", "env_logger 0.7.1", "fdlimit", - "futures 0.3.4", + "futures 0.3.5", "lazy_static", "log", "names", @@ -5972,7 +6063,7 @@ version = "2.0.0-rc4" dependencies = [ "derive_more", "fnv", - "futures 0.3.4", + "futures 0.3.5", "hash-db", "hex-literal", "kvdb", @@ -6053,7 +6144,7 @@ version = "0.8.0-rc4" dependencies = [ "derive_more", "env_logger 0.7.1", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6092,7 +6183,7 @@ dependencies = [ "derive_more", "env_logger 0.7.1", "fork-tree", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "log", "merlin", @@ -6141,7 +6232,7 @@ name = "sc-consensus-babe-rpc" version = "0.8.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6183,7 +6274,7 @@ dependencies = [ "assert_matches", "derive_more", "env_logger 0.7.1", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6211,7 +6302,7 @@ name = "sc-consensus-pow" version = "0.8.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "log", "parity-scale-codec", "sc-client-api", @@ -6231,7 +6322,7 @@ dependencies = [ name = "sc-consensus-slots" version = "0.8.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6360,7 +6451,7 @@ dependencies = [ "env_logger 0.7.1", "finality-grandpa", "fork-tree", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "log", "parity-scale-codec", @@ -6402,7 +6493,7 @@ version = "0.8.0-rc4" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6418,7 +6509,7 @@ name = "sc-informant" version = "0.8.0-rc4" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.4", + "futures 0.3.5", "log", "parity-util-mem", "parking_lot 0.10.2", @@ -6480,9 +6571,9 @@ dependencies = [ "erased-serde", "fnv", "fork-tree", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", - "futures_codec", + "futures_codec 0.3.4", "hex", "ip_network", "libp2p", @@ -6519,7 +6610,7 @@ dependencies = [ "substrate-test-runtime-client", "tempfile", "thiserror", - "unsigned-varint", + "unsigned-varint 0.3.3", "void", "wasm-timer", "zeroize", @@ -6530,7 +6621,7 @@ name = "sc-network-gossip" version = "0.8.0-rc4" dependencies = [ "async-std", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "libp2p", "log", @@ -6548,7 +6639,7 @@ name = "sc-network-test" version = "0.8.0-rc4" dependencies = [ "env_logger 0.7.1", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "libp2p", "log", @@ -6576,7 +6667,7 @@ dependencies = [ "bytes 0.5.4", "env_logger 0.7.1", "fnv", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "hyper 0.13.4", "hyper-rustls", @@ -6606,7 +6697,7 @@ dependencies = [ name = "sc-peerset" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "libp2p", "log", "rand 0.7.3", @@ -6629,7 +6720,7 @@ version = "2.0.0-rc4" dependencies = [ "assert_matches", "futures 0.1.29", - "futures 0.3.4", + "futures 0.3.5", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", @@ -6667,7 +6758,7 @@ name = "sc-rpc-api" version = "0.8.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6721,7 +6812,7 @@ dependencies = [ "directories", "exit-future", "futures 0.1.29", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "hash-db", "jsonrpc-pubsub", @@ -6786,7 +6877,7 @@ dependencies = [ "env_logger 0.7.1", "fdlimit", "futures 0.1.29", - "futures 0.3.4", + "futures 0.3.5", "hex-literal", "log", "parity-scale-codec", @@ -6833,8 +6924,7 @@ dependencies = [ name = "sc-telemetry" version = "2.0.0-rc4" dependencies = [ - "bytes 0.5.4", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "libp2p", "log", @@ -6874,7 +6964,7 @@ dependencies = [ "assert_matches", "criterion 0.3.1", "derive_more", - "futures 0.3.4", + "futures 0.3.5", "linked-hash-map", "log", "parity-scale-codec", @@ -6896,7 +6986,7 @@ version = "2.0.0-rc4" dependencies = [ "assert_matches", "derive_more", - "futures 0.3.4", + "futures 0.3.5", "futures-diagnose", "hex", "intervalier", @@ -7101,12 +7191,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" version = "0.8.1" @@ -7215,13 +7299,13 @@ checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" [[package]] name = "snow" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb767eee7d257ba202f0b9b08673bc13b22281632ef45267b19f13100accd2f" +checksum = "ce0f91be479494dd92e69d9971bd23ed27037dd1c94fcf558f6c6e74e6afa654" dependencies = [ - "arrayref", - "blake2-rfc", - "chacha20-poly1305-aead", + "aes-gcm", + "blake2", + "chacha20poly1305", "rand 0.7.3", "rand_core 0.5.1", "ring", @@ -7245,22 +7329,18 @@ dependencies = [ [[package]] name = "soketto" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c9dab3f95c9ebdf3a88268c19af668f637a3c5039c2c56ff2d40b1b2d64a25b" +checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" dependencies = [ - "base64 0.11.0", + "base64 0.12.0", "bytes 0.5.4", "flate2", - "futures 0.3.4", - "http 0.2.1", + "futures 0.3.5", "httparse", "log", "rand 0.7.3", - "sha1", - "smallvec 1.4.0", - "static_assertions", - "thiserror", + "sha-1", ] [[package]] @@ -7428,7 +7508,7 @@ name = "sp-consensus" version = "0.8.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "libp2p", "log", @@ -7509,7 +7589,7 @@ dependencies = [ "criterion 0.2.11", "derive_more", "ed25519-dalek", - "futures 0.3.4", + "futures 0.3.5", "hash-db", "hash256-std-hasher", "hex", @@ -7612,7 +7692,7 @@ dependencies = [ name = "sp-io" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "hash-db", "libsecp256k1", "log", @@ -7911,7 +7991,7 @@ name = "sp-transaction-pool" version = "2.0.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "log", "parity-scale-codec", "serde", @@ -7943,7 +8023,7 @@ dependencies = [ name = "sp-utils" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -8127,7 +8207,7 @@ dependencies = [ "console_error_panic_hook", "console_log", "futures 0.1.29", - "futures 0.3.4", + "futures 0.3.5", "futures-timer 3.0.2", "js-sys", "kvdb-web", @@ -8157,7 +8237,7 @@ version = "2.0.0-rc4" dependencies = [ "frame-support", "frame-system", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-client-transports", "jsonrpc-core", "parity-scale-codec", @@ -8173,7 +8253,7 @@ version = "2.0.0-rc4" dependencies = [ "env_logger 0.7.1", "frame-system-rpc-runtime-api", - "futures 0.3.4", + "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -8209,7 +8289,7 @@ dependencies = [ name = "substrate-test-client" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "hash-db", "parity-scale-codec", "sc-client-api", @@ -8273,7 +8353,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0-rc4" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -8294,7 +8374,7 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0-rc4" dependencies = [ "derive_more", - "futures 0.3.4", + "futures 0.3.5", "parity-scale-codec", "parking_lot 0.10.2", "sc-transaction-graph", @@ -9153,6 +9233,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +[[package]] +name = "universal-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +dependencies = [ + "generic-array", + "subtle 2.2.2", +] + [[package]] name = "unsigned-varint" version = "0.3.3" @@ -9162,7 +9252,17 @@ dependencies = [ "bytes 0.5.4", "futures-io", "futures-util", - "futures_codec", + "futures_codec 0.3.4", +] + +[[package]] +name = "unsigned-varint" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" +dependencies = [ + "bytes 0.5.4", + "futures_codec 0.4.1", ] [[package]] @@ -9396,7 +9496,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "js-sys", "parking_lot 0.9.0", "pin-utils", @@ -9636,11 +9736,11 @@ dependencies = [ [[package]] name = "yamux" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84300bb493cc878f3638b981c62b4632ec1a5c52daaa3036651e8c106d3b55ea" +checksum = "cd37e58a1256a0b328ce9c67d8b62ecdd02f4803ba443df478835cb1a41a637c" dependencies = [ - "futures 0.3.4", + "futures 0.3.5", "log", "nohash-hasher", "parking_lot 0.10.2", diff --git a/Cargo.toml b/Cargo.toml index d1c7339b99..ba146e55bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -206,7 +206,7 @@ blake2 = { opt-level = 3 } blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } blake2s_simd = { opt-level = 3 } -chacha20-poly1305-aead = { opt-level = 3 } +chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } cranelift-wasm = { opt-level = 3 } crc32fast = { opt-level = 3 } diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index d8710b0b4b..0fa2c4d51a 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } jsonrpc-core = "14.2.0" serde = "1.0.106" serde_json = "1.0.48" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 92fffe898f..5ade94275e 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -33,7 +33,7 @@ derive_more = { version = "0.99.2" } sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.2.0", features = ["http"] } hyper = "0.12.35" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } serde_json = "1.0" [features] diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 84a37bd16c..a3ff17d9e0 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", default-features = false, version = "1 derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.19.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.20.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} prost = "0.6.1" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 51e15e24ce..aba5b49563 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } log = "0.4.8" lru = "0.4.3" sc-network = { version = "0.8.0-rc4", path = "../network" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index f0ba362e48..495895c740 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -63,7 +63,7 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.19.1" +version = "0.20.1" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] @@ -71,7 +71,7 @@ features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std" async-std = "1.5" assert_matches = "1.3" env_logger = "0.7.0" -libp2p = { version = "0.19.1", default-features = false, features = ["secio"] } +libp2p = { version = "0.20.1", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 73a5916947..c48722c0f7 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -601,7 +601,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(ok) => { let results = ok.records .into_iter() - .map(|r| (r.key, r.value)) + .map(|r| (r.record.key, r.record.value)) .collect(); DiscoveryOut::ValueFound(results) diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 393887572c..6527d093bd 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0-rc4", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index eb7f237548..bdec765eda 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 95c430dad7..8d4aecc468 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -14,12 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bytes = "0.5" parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.19.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.20.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/client/telemetry/src/worker.rs b/client/telemetry/src/worker.rs index 68d4c4e209..e01ac62d12 100644 --- a/client/telemetry/src/worker.rs +++ b/client/telemetry/src/worker.rs @@ -28,7 +28,6 @@ //! events indicating what happened since the latest polling. //! -use bytes::BytesMut; use futures::{prelude::*, ready}; use libp2p::{core::transport::OptionalTransport, Multiaddr, Transport, wasm_ext}; use log::{trace, warn, error}; @@ -61,8 +60,8 @@ impl, I> StreamAndSink for T {} type WsTrans = libp2p::core::transport::boxed::Boxed< Pin, + Vec, + Item = Result, io::Error>, Error = io::Error > + Send>>, io::Error @@ -92,12 +91,12 @@ impl TelemetryWorker { libp2p::websocket::framed::WsConfig::new(inner) .and_then(|connec, _| { let connec = connec - .with(|item: BytesMut| { + .with(|item| { let item = libp2p::websocket::framed::OutgoingData::Binary(item); future::ready(Ok::<_, io::Error>(item)) }) .try_filter(|item| future::ready(item.is_data())) - .map_ok(|data| BytesMut::from(data.as_ref())); + .map_ok(|data| data.into_bytes()); future::ready(Ok::<_, io::Error>(connec)) }) }); @@ -189,7 +188,7 @@ impl TelemetryWorker { /// For some context, we put this object around the `wasm_ext::ExtTransport` in order to make sure /// that each telemetry message maps to one single call to `write` in the WASM FFI. #[pin_project::pin_project] -struct StreamSink(#[pin] T, Option); +struct StreamSink(#[pin] T, Option>); impl From for StreamSink { fn from(inner: T) -> StreamSink { @@ -198,15 +197,15 @@ impl From for StreamSink { } impl Stream for StreamSink { - type Item = Result; + type Item = Result, io::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = self.project(); - let mut buf = [0; 128]; + let mut buf = vec![0; 128]; match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { Ok(0) => Poll::Ready(None), Ok(n) => { - let buf: BytesMut = buf[..n].into(); + buf.truncate(n); Poll::Ready(Some(Ok(buf))) }, Err(err) => Poll::Ready(Some(Err(err))), @@ -232,7 +231,7 @@ impl StreamSink { } } -impl Sink for StreamSink { +impl Sink> for StreamSink { type Error = io::Error; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { @@ -240,7 +239,7 @@ impl Sink for StreamSink { Poll::Ready(Ok(())) } - fn start_send(self: Pin<&mut Self>, item: BytesMut) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { let this = self.project(); debug_assert!(this.1.is_none()); *this.1 = Some(item); diff --git a/client/telemetry/src/worker/node.rs b/client/telemetry/src/worker/node.rs index 6b1a0f62b1..eef7ca7e81 100644 --- a/client/telemetry/src/worker/node.rs +++ b/client/telemetry/src/worker/node.rs @@ -18,7 +18,6 @@ //! Contains the `Node` struct, which handles communications with a single telemetry endpoint. -use bytes::BytesMut; use futures::prelude::*; use futures_timer::Delay; use libp2p::Multiaddr; @@ -57,7 +56,7 @@ struct NodeSocketConnected { /// Where to send data. sink: TTrans::Output, /// Queue of packets to send. - pending: VecDeque, + pending: VecDeque>, /// If true, we need to flush the sink. need_flush: bool, /// A timeout for the socket to write data. @@ -103,15 +102,15 @@ impl Node { impl Node where TTrans: Clone + Unpin, TTrans::Dial: Unpin, - TTrans::Output: Sink - + Stream> + TTrans::Output: Sink, Error = TSinkErr> + + Stream, TSinkErr>> + Unpin, TSinkErr: fmt::Debug { /// Sends a WebSocket frame to the node. Returns an error if we are not connected to the node. /// /// After calling this method, you should call `poll` in order for it to be properly processed. - pub fn send_message(&mut self, payload: impl Into) -> Result<(), ()> { + pub fn send_message(&mut self, payload: impl Into>) -> Result<(), ()> { if let NodeSocket::Connected(NodeSocketConnected { pending, .. }) = &mut self.socket { if pending.len() <= MAX_PENDING { trace!(target: "telemetry", "Adding log entry to queue for {:?}", self.addr); @@ -203,8 +202,8 @@ fn gen_rand_reconnect_delay() -> Delay { } impl NodeSocketConnected -where TTrans::Output: Sink - + Stream> +where TTrans::Output: Sink, Error = TSinkErr> + + Stream, TSinkErr>> + Unpin { /// Processes the queue of messages for the connected socket. diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index eff425e440..39c47545c2 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.19.1", default-features = false } +libp2p = { version = "0.20.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0-rc4"} sp-inherents = { version = "2.0.0-rc4", path = "../../inherents" } -- GitLab From 9dd12f98c5f349e3d71b9419f8104f4e5414ddaf Mon Sep 17 00:00:00 2001 From: Thomas Scholtes Date: Tue, 30 Jun 2020 10:10:18 +0200 Subject: [PATCH 095/144] Remove unecessary &mut in call argument (#6540) --- primitives/api/proc-macro/src/impl_runtime_apis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 4b5c1c4706..a4c35dcf42 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -83,7 +83,7 @@ fn generate_impl_call( let (#( #pnames ),*) : ( #( #ptypes ),* ) = match #c::DecodeLimit::decode_all_with_depth_limit( #c::MAX_EXTRINSIC_DEPTH, - &mut #input, + &#input, ) { Ok(res) => res, Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), -- GitLab From 0d0a84db85c71631c11fbf1bb4a996319654b9c3 Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Tue, 30 Jun 2020 10:31:32 +0200 Subject: [PATCH 096/144] feat: Allocate ss58 address format to DataHighway (#6530) --- primitives/core/src/crypto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 9b84bd84ca..aa77345993 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -474,6 +474,8 @@ ss58_address_format!( (20, "stafi", "Stafi mainnet, standard account (*25519).") RobonomicsAccount => (32, "robonomics", "Any Robonomics network standard account (*25519).") + DataHighwayAccount => + (33, "datahighway", "DataHighway mainnet, standard account (*25519).") CentrifugeAccount => (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") SubstrateAccount => -- GitLab From 4eaea348c9ea2568e486be475075d111971e85e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 30 Jun 2020 11:02:46 +0200 Subject: [PATCH 097/144] Fix tx-pool returning the same transaction multiple times (#6535) * Fix tx-pool returning the same transaction multiple times This fixes a bug that lead to returning the same transaction multiple times when iterating the `ready` iterator. Internally the transaction was kept in the `best` list and could be duplicated in that list be re-inserting it again. This `best` list is using a `TransactionRef` which internally uses a `insertion_id`. This `insertion_id` could lead to the same transaction being inserted multiple times into the `best` list. * Update client/transaction-pool/src/testing/pool.rs Co-authored-by: Nikolay Volf Co-authored-by: Nikolay Volf --- client/transaction-pool/graph/src/ready.rs | 24 ++++++++---------- client/transaction-pool/src/revalidation.rs | 8 +++--- client/transaction-pool/src/testing/pool.rs | 25 +++++++++++++++++++ test-utils/runtime/src/lib.rs | 14 +++++++++-- .../runtime/transaction-pool/src/lib.rs | 16 ++++++++---- 5 files changed, 63 insertions(+), 24 deletions(-) diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index 47289f26f0..b98512b05d 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -275,12 +275,7 @@ impl ReadyTransactions { ) -> Vec>> { let mut removed = vec![]; let mut ready = self.ready.write(); - loop { - let hash = match to_remove.pop() { - Some(hash) => hash, - None => return removed, - }; - + while let Some(hash) = to_remove.pop() { if let Some(mut tx) = ready.remove(&hash) { let invalidated = tx.transaction.transaction.provides .iter() @@ -319,6 +314,8 @@ impl ReadyTransactions { removed.push(tx.transaction.transaction); } } + + removed } /// Removes transactions that provide given tag. @@ -330,17 +327,16 @@ impl ReadyTransactions { let mut removed = vec![]; let mut to_remove = vec![tag]; - loop { - let tag = match to_remove.pop() { - Some(tag) => tag, - None => return removed, - }; - + while let Some(tag) = to_remove.pop() { let res = self.provided_tags.remove(&tag) - .and_then(|hash| self.ready.write().remove(&hash)); + .and_then(|hash| self.ready.write().remove(&hash)); if let Some(tx) = res { let unlocks = tx.unlocks; + + // Make sure we remove it from best txs + self.best.remove(&tx.transaction); + let tx = tx.transaction.transaction; // prune previous transactions as well @@ -403,6 +399,8 @@ impl ReadyTransactions { removed.push(tx); } } + + removed } /// Checks if the transaction is providing the same tags as other transactions. diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index cb49560662..af9a76c055 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -141,14 +141,14 @@ impl RevalidationWorker { // which they got into the pool while left > 0 { let first_block = match self.block_ordered.keys().next().cloned() { - Some(bn) => bn, - None => break, + Some(bn) => bn, + None => break, }; let mut block_drained = false; if let Some(extrinsics) = self.block_ordered.get_mut(&first_block) { let to_queue = extrinsics.iter().take(left).cloned().collect::>(); if to_queue.len() == extrinsics.len() { - block_drained = true; + block_drained = true; } else { for xt in &to_queue { extrinsics.remove(xt); @@ -159,7 +159,7 @@ impl RevalidationWorker { } if block_drained { - self.block_ordered.remove(&first_block); + self.block_ordered.remove(&first_block); } } diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 61aba5efe3..5ad79a6f75 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -1066,3 +1066,28 @@ fn import_notification_to_pool_maintain_works() { block_on(pool.maintain(evt.into())); assert_eq!(pool.status().ready, 0); } + +// When we prune transactions, we need to make sure that we remove +#[test] +fn pruning_a_transaction_should_remove_it_from_best_transaction() { + let (pool, _guard, _notifier) = maintained_pool(); + + let xt1 = Extrinsic::IncludeData(Vec::new()); + + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + let header = pool.api.push_block(1, vec![xt1.clone()]); + + // This will prune `xt1`. + block_on(pool.maintain(block_event(header))); + + // Submit the tx again. + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("2. Imported"); + + let mut iterator = block_on(pool.ready_at(1)); + + assert_eq!(iterator.next().unwrap().data, xt1.clone()); + + // If the tx was not removed from the best txs, the tx would be + // returned a second time by the iterator. + assert!(iterator.next().is_none()); +} diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 1d376a0940..06054c1240 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -194,10 +194,20 @@ impl sp_runtime::traits::Dispatchable for Extrinsic { } impl Extrinsic { + /// Convert `&self` into `&Transfer`. + /// + /// Panics if this is no `Transfer` extrinsic. pub fn transfer(&self) -> &Transfer { + self.try_transfer().expect("cannot convert to transfer ref") + } + + /// Try to convert `&self` into `&Transfer`. + /// + /// Returns `None` if this is no `Transfer` extrinsic. + pub fn try_transfer(&self) -> Option<&Transfer> { match self { - Extrinsic::Transfer { ref transfer, .. } => transfer, - _ => panic!("cannot convert to transfer ref"), + Extrinsic::Transfer { ref transfer, .. } => Some(transfer), + _ => None, } } } diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 5140cb8b92..17cecd394a 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -209,13 +209,19 @@ impl sc_transaction_graph::ChainApi for TestApi { ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); - let chain_nonce = self.chain.read().nonces.get(&uxt.transfer().from).cloned().unwrap_or(0); - let requires = if chain_nonce == uxt.transfer().nonce { - vec![] + let (requires, provides) = if let Some(transfer) = uxt.try_transfer() { + let chain_nonce = self.chain.read().nonces.get(&transfer.from).cloned().unwrap_or(0); + let requires = if chain_nonce == transfer.nonce { + vec![] + } else { + vec![vec![chain_nonce as u8]] + }; + let provides = vec![vec![transfer.nonce as u8]]; + + (requires, provides) } else { - vec![vec![chain_nonce as u8]] + (Vec::new(), vec![uxt.encode()]) }; - let provides = vec![vec![uxt.transfer().nonce as u8]]; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { return futures::future::ready(Ok( -- GitLab From ec2ab7978a54e7f20af2f9ac12bde7719bb744fb Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 30 Jun 2020 12:00:42 +0200 Subject: [PATCH 098/144] Remove the service, replacing it with a struct of individual chain components (#6352) * WIP * Making progress * Almost ready * Get service tests compiling * Fix node screenshot * Line widths * Fix node cli tests * Fix node cli warning * ChainComponents -> ServiceComponents, fix tests * make spawn_handle public * Remove spawnnamed impl for taskmanager * Move the keep alive stuff to the task manager * Move the telemetry, base path, rpc keep_alive to the service builder * Make the task manager keep alive an internal detail * Rewrite the browser start_client future * Remove run_node etc * Revert my personal changes to browser-demo/build.sh * use |config| * Add a runtime_version function to SubstrateCli * Reexport role and runtime version from sc cli * Update Cargo.lock * runtime_version -> native_runtime_version * Pass chain spec to native_runtime_version for polkadot * Fix line widths * Traitify ServiceComponents Client --- bin/node-template/node/src/command.rs | 15 +- bin/node-template/node/src/service.rs | 48 +-- bin/node/cli/src/browser.rs | 8 +- bin/node/cli/src/chain_spec.rs | 12 +- bin/node/cli/src/command.rs | 15 +- bin/node/cli/src/service.rs | 371 +++++++++++--------- bin/node/testing/src/bench.rs | 2 +- client/api/src/client.rs | 3 + client/cli/src/lib.rs | 7 +- client/cli/src/runner.rs | 104 +----- client/finality-grandpa/src/light_import.rs | 2 +- client/service/src/builder.rs | 169 ++++----- client/service/src/client/client.rs | 11 +- client/service/src/lib.rs | 324 ++++------------- client/service/src/task_manager.rs | 62 +++- client/service/test/src/client/light.rs | 8 +- client/service/test/src/lib.rs | 211 +++++++---- test-utils/client/src/client_ext.rs | 1 + utils/browser/src/lib.rs | 43 +-- 19 files changed, 640 insertions(+), 776 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 18e1b22a53..4f2fd3aad6 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -18,7 +18,7 @@ use crate::chain_spec; use crate::cli::Cli; use crate::service; -use sc_cli::SubstrateCli; +use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; impl SubstrateCli for Cli { fn impl_name() -> &'static str { @@ -58,6 +58,10 @@ impl SubstrateCli for Cli { )?), }) } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &node_template_runtime::VERSION + } } /// Parse and run command line arguments @@ -71,11 +75,10 @@ pub fn run() -> sc_cli::Result<()> { } None => { let runner = cli.create_runner(&cli.run)?; - runner.run_node( - service::new_light, - service::new_full, - node_template_runtime::VERSION - ) + runner.run_node_until_exit(|config| match config.role { + Role::Light => service::new_light(config), + _ => service::new_full(config), + }) } } } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index e330c17b24..89bf159927 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -5,7 +5,10 @@ use std::time::Duration; use sc_client_api::ExecutorProvider; use sc_consensus::LongestChain; use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; +use sc_service::{ + error::{Error as ServiceError}, Configuration, ServiceBuilder, ServiceComponents, + TaskManager, +}; use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; @@ -93,7 +96,7 @@ macro_rules! new_full_start { } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(config: Configuration) -> Result { let role = config.role.clone(); let force_authoring = config.force_authoring; let name = config.network.node_name.clone(); @@ -105,7 +108,10 @@ pub fn new_full(config: Configuration) -> Result>; @@ -115,13 +121,12 @@ pub fn new_full(config: Configuration) -> Result Result( sc_consensus_aura::slot_duration(&*client)?, - client, + client.clone(), select_chain, block_import, proposer, - service.network(), + network.clone(), inherent_data_providers.clone(), force_authoring, - service.keystore(), + keystore.clone(), can_author_with, )?; // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. - service.spawn_essential_task_handle().spawn_blocking("aura", aura); + task_manager.spawn_essential_handle().spawn_blocking("aura", aura); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. let keystore = if role.is_authority() { - Some(service.keystore() as sp_core::traits::BareCryptoStorePtr) + Some(keystore.clone() as sp_core::traits::BareCryptoStorePtr) } else { None }; @@ -174,33 +179,33 @@ pub fn new_full(config: Configuration) -> Result Result { +pub fn new_light(config: Configuration) -> Result { let inherent_data_providers = InherentDataProviders::new(); ServiceBuilder::new_light::(config)? @@ -265,4 +270,5 @@ pub fn new_light(config: Configuration) -> Result, log_level: String) -> Result ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::service::{new_full, new_light}; + use crate::service::{new_full_base, new_light_base}; use sc_service_test; use sp_runtime::BuildStorage; @@ -430,8 +430,14 @@ pub(crate) mod tests { fn test_connectivity() { sc_service_test::connectivity( integration_test_config_with_two_authorities(), - |config| new_full(config), - |config| new_light(config), + |config| { + let (keep_alive, _, client, network, transaction_pool) = new_full_base(config,|_, _| ())?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + |config| { + let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + } ); } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index bd5483f2cd..b07e0cdc90 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -19,7 +19,7 @@ use crate::{chain_spec, service, Cli, Subcommand}; use node_executor::Executor; use node_runtime::{Block, RuntimeApi}; -use sc_cli::{Result, SubstrateCli}; +use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; impl SubstrateCli for Cli { fn impl_name() -> &'static str { @@ -61,6 +61,10 @@ impl SubstrateCli for Cli { )?), }) } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &node_runtime::VERSION + } } /// Parse command line arguments into service configuration. @@ -70,11 +74,10 @@ pub fn run() -> Result<()> { match &cli.subcommand { None => { let runner = cli.create_runner(&cli.run)?; - runner.run_node( - service::new_light, - service::new_full, - node_runtime::VERSION - ) + runner.run_node_until_exit(|config| match config.role { + Role::Light => service::new_light(config), + _ => service::new_full(config), + }) } Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 3279490363..9707e3d8ca 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,10 +29,16 @@ use node_executor; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ - AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, + ServiceBuilder, config::{Role, Configuration}, error::{Error as ServiceError}, + RpcHandlers, ServiceComponents, TaskManager, }; use sp_inherents::InherentDataProviders; use sc_consensus::LongestChain; +use sc_network::{Event, NetworkService}; +use sp_runtime::traits::Block as BlockT; +use futures::prelude::*; +use sc_client_api::ExecutorProvider; +use sp_core::traits::BareCryptoStorePtr; /// Starts a `ServiceBuilder` for a full service. /// @@ -147,183 +153,197 @@ macro_rules! new_full_start { }} } -/// Creates a full service from the configuration. -/// -/// We need to use a macro because the test suit doesn't work with an opaque service. It expects -/// concrete types instead. -macro_rules! new_full { - ($config:expr, $with_startup_data: expr) => {{ - use futures::prelude::*; - use sc_network::Event; - use sc_client_api::ExecutorProvider; - use sp_core::traits::BareCryptoStorePtr; - - let ( - role, - force_authoring, - name, - disable_grandpa, - ) = ( - $config.role.clone(), - $config.force_authoring, - $config.network.node_name.clone(), - $config.disable_grandpa, - ); +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type GrandpaBlockImport = grandpa::GrandpaBlockImport< + FullBackend, Block, FullClient, sc_consensus::LongestChain +>; +type BabeBlockImport = sc_consensus_babe::BabeBlockImport; - let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) = - new_full_start!($config); +/// Creates a full service from the configuration. +pub fn new_full_base( + config: Configuration, + with_startup_data: impl FnOnce(&BabeBlockImport, &sc_consensus_babe::BabeLink) +) -> Result<( + TaskManager, + InherentDataProviders, + Arc, Arc::Hash>>, + Arc, Block>> +), ServiceError> { + let ( + role, + force_authoring, + name, + disable_grandpa, + ) = ( + config.role.clone(), + config.force_authoring, + config.network.node_name.clone(), + config.disable_grandpa, + ); + + let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) = + new_full_start!(config); + + let ServiceComponents { + client, transaction_pool, task_manager, keystore, network, select_chain, + prometheus_registry, telemetry_on_connect_sinks, .. + } = builder + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) + })? + .build_full()?; - let service = builder - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) - })? - .build_full()?; + let (block_import, grandpa_link, babe_link) = import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - let (block_import, grandpa_link, babe_link) = import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + let shared_voter_state = rpc_setup.take() + .expect("The SharedVoterState is present for Full Services or setup failed before. qed"); - let shared_voter_state = rpc_setup.take() - .expect("The SharedVoterState is present for Full Services or setup failed before. qed"); + (with_startup_data)(&block_import, &babe_link); - ($with_startup_data)(&block_import, &babe_link); + if let sc_service::config::Role::Authority { .. } = &role { + let proposer = sc_basic_authorship::ProposerFactory::new( + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + ); - if let sc_service::config::Role::Authority { .. } = &role { - let proposer = sc_basic_authorship::ProposerFactory::new( - service.client(), - service.transaction_pool(), - service.prometheus_registry().as_ref(), - ); + let select_chain = select_chain + .ok_or(sc_service::Error::SelectChainRequired)?; - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(sc_service::Error::SelectChainRequired)?; + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let babe_config = sc_consensus_babe::BabeParams { + keystore: keystore.clone(), + client: client.clone(), + select_chain, + env: proposer, + block_import, + sync_oracle: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring, + babe_link, + can_author_with, + }; - let babe_config = sc_consensus_babe::BabeParams { - keystore: service.keystore(), - client, - select_chain, - env: proposer, - block_import, - sync_oracle: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - force_authoring, - babe_link, - can_author_with, - }; + let babe = sc_consensus_babe::start_babe(babe_config)?; + task_manager.spawn_essential_handle().spawn_blocking("babe-proposer", babe); + } - let babe = sc_consensus_babe::start_babe(babe_config)?; - service.spawn_essential_task_handle().spawn_blocking("babe-proposer", babe); - } - - // Spawn authority discovery module. - if matches!(role, sc_service::config::Role::Authority{..} | sc_service::config::Role::Sentry {..}) { - let (sentries, authority_discovery_role) = match role { - sc_service::config::Role::Authority { ref sentry_nodes } => ( - sentry_nodes.clone(), - sc_authority_discovery::Role::Authority ( - service.keystore(), - ), - ), - sc_service::config::Role::Sentry {..} => ( - vec![], - sc_authority_discovery::Role::Sentry, + // Spawn authority discovery module. + if matches!(role, Role::Authority{..} | Role::Sentry {..}) { + let (sentries, authority_discovery_role) = match role { + sc_service::config::Role::Authority { ref sentry_nodes } => ( + sentry_nodes.clone(), + sc_authority_discovery::Role::Authority ( + keystore.clone(), ), - _ => unreachable!("Due to outer matches! constraint; qed.") - }; + ), + sc_service::config::Role::Sentry {..} => ( + vec![], + sc_authority_discovery::Role::Sentry, + ), + _ => unreachable!("Due to outer matches! constraint; qed.") + }; - let network = service.network(); - let dht_event_stream = network.event_stream("authority-discovery").filter_map(|e| async move { match e { + let dht_event_stream = network.event_stream("authority-discovery") + .filter_map(|e| async move { match e { Event::Dht(e) => Some(e), _ => None, }}).boxed(); - let authority_discovery = sc_authority_discovery::AuthorityDiscovery::new( - service.client(), - network, - sentries, - dht_event_stream, - authority_discovery_role, - service.prometheus_registry(), - ); + let authority_discovery = sc_authority_discovery::AuthorityDiscovery::new( + client.clone(), + network.clone(), + sentries, + dht_event_stream, + authority_discovery_role, + prometheus_registry.clone(), + ); - service.spawn_task_handle().spawn("authority-discovery", authority_discovery); - } + task_manager.spawn_handle().spawn("authority-discovery", authority_discovery); + } - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(service.keystore() as BareCryptoStorePtr) - } else { - None - }; + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { + Some(keystore.clone() as BareCryptoStorePtr) + } else { + None + }; - let config = grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_network_authority(), - }; + let config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_network_authority(), + }; - let enable_grandpa = !disable_grandpa; - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = grandpa::GrandpaParams { - config, - link: grandpa_link, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), - prometheus_registry: service.prometheus_registry(), - shared_voter_state, - }; + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = grandpa::GrandpaParams { + config, + link: grandpa_link, + network: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(telemetry_on_connect_sinks.on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry: prometheus_registry.clone(), + shared_voter_state, + }; - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task_handle().spawn_blocking( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); - } else { - grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - } + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + client.clone(), + &inherent_data_providers, + network.clone(), + )?; + } - Ok((service, inherent_data_providers)) - }}; - ($config:expr) => {{ - new_full!($config, |_, _| {}) - }} + Ok((task_manager, inherent_data_providers, client, network, transaction_pool)) } /// Builds a new service for a full client. pub fn new_full(config: Configuration) --> Result -{ - new_full!(config).map(|(service, _)| service) +-> Result { + new_full_base(config, |_, _| ()).map(|(task_manager, _, _, _, _)| { + task_manager + }) } -/// Builds a new service for a light client. -pub fn new_light(config: Configuration) --> Result { +type LightClient = sc_service::TLightClient; +type LightFetcher = sc_network::config::OnDemand; + +pub fn new_light_base(config: Configuration) -> Result<( + TaskManager, Arc, Arc, + Arc::Hash>>, + Arc, Block + >> +), ServiceError> { let inherent_data_providers = InherentDataProviders::new(); - let service = ServiceBuilder::new_light::(config)? + let ServiceComponents { + task_manager, rpc_handlers, client, network, transaction_pool, .. + } = ServiceBuilder::new_light::(config)? .with_select_chain(|_config, backend| { Ok(LongestChain::new(backend.clone())) })? @@ -406,16 +426,21 @@ pub fn new_light(config: Configuration) Ok(node_rpc::create_light(light_deps)) })? .build_light()?; + + Ok((task_manager, rpc_handlers, client, network, transaction_pool)) +} - Ok(service) +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) -> Result { + new_light_base(config).map(|(task_manager, _, _, _, _)| { + task_manager + }) } #[cfg(test)] mod tests { use std::{sync::Arc, borrow::Cow, any::Any}; - use sc_consensus_babe::{ - CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY - }; + use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; use sp_consensus::{ Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, @@ -434,10 +459,11 @@ mod tests { use sp_timestamp; use sp_finality_tracker; use sp_keyring::AccountKeyring; - use sc_service::AbstractService; - use crate::service::{new_full, new_light}; + use sc_service_test::TestNetNode; + use crate::service::{new_full_base, new_light_base}; use sp_runtime::traits::IdentifyAccount; use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; + use sc_client_api::BlockBackend; type AccountPublic = ::Signer; @@ -466,14 +492,25 @@ mod tests { chain_spec, |config| { let mut setup_handles = None; - new_full!(config, | - block_import: &sc_consensus_babe::BabeBlockImport, - babe_link: &sc_consensus_babe::BabeLink, - | { - setup_handles = Some((block_import.clone(), babe_link.clone())); - }).map(move |(node, x)| (node, (x, setup_handles.unwrap()))) + let (keep_alive, inherent_data_providers, client, network, transaction_pool) = + new_full_base(config, + | + block_import: &sc_consensus_babe::BabeBlockImport, + babe_link: &sc_consensus_babe::BabeLink, + | { + setup_handles = Some((block_import.clone(), babe_link.clone())); + } + )?; + + let node = sc_service_test::TestNetComponents::new( + keep_alive, client, network, transaction_pool + ); + Ok((node, (inherent_data_providers, setup_handles.unwrap()))) + }, + |config| { + let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) }, - |config| new_light(config), |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { let mut inherent_data = inherent_data_providers .create_inherent_data() @@ -620,8 +657,14 @@ mod tests { fn test_consensus() { sc_service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), - |config| new_full(config), - |config| new_light(config), + |config| { + let (keep_alive, _, client, network, transaction_pool) = new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + |config| { + let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, vec![ "//Alice".into(), "//Bob".into(), diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index fc5daa80ad..5df2709f87 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -55,7 +55,7 @@ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_inherents::InherentData; use sc_client_api::{ - ExecutionStrategy, + ExecutionStrategy, BlockBackend, execution_extensions::{ExecutionExtensions, ExecutionStrategies}, }; use sp_core::{Pair, Public, sr25519, ed25519}; diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 42dd5d53b1..35d40965e6 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -90,6 +90,9 @@ pub trait BlockBackend { /// Get block justification set by id. fn justification(&self, id: &BlockId) -> sp_blockchain::Result>; + + /// Get block hash by number. + fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; } /// Provide a list of potential uncle headers for a given block. diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 9623b08bfb..a702edba65 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -37,7 +37,9 @@ use log::info; pub use params::*; use regex::Regex; pub use runner::*; -use sc_service::{ChainSpec, Configuration, TaskExecutor}; +use sc_service::{Configuration, TaskExecutor}; +pub use sc_service::{ChainSpec, Role}; +pub use sp_version::RuntimeVersion; use std::io::Write; pub use structopt; use structopt::{ @@ -207,6 +209,9 @@ pub trait SubstrateCli: Sized { command.init::()?; Runner::new(self, command) } + + /// Native runtime version. + fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } /// Initialize the logger diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 51ea2d2186..fcc869dc87 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -25,10 +25,9 @@ use futures::pin_mut; use futures::select; use futures::{future, future::FutureExt, Future}; use log::info; -use sc_service::{AbstractService, Configuration, Role, ServiceBuilderCommand, TaskType}; +use sc_service::{Configuration, ServiceBuilderCommand, TaskType, TaskManager}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; -use sp_version::RuntimeVersion; use std::{fmt::Debug, marker::PhantomData, str::FromStr}; #[cfg(target_family = "unix")] @@ -153,7 +152,7 @@ impl Runner { /// 2020-06-03 16:14:21 💾 Database: RocksDb at /tmp/c/chains/flamingfir7/db /// 2020-06-03 16:14:21 ⛓ Native runtime: node-251 (substrate-node-1.tx1.au10) /// ``` - pub fn print_node_infos(&self, runtime_version: RuntimeVersion) { + fn print_node_infos(&self) { info!("{}", C::impl_name()); info!("✌️ version {}", C::impl_version()); info!( @@ -169,64 +168,7 @@ impl Runner { self.config.database, self.config.database.path().map_or_else(|| "".to_owned(), |p| p.display().to_string()) ); - info!("⛓ Native runtime: {}", runtime_version); - } - - /// A helper function that runs an `AbstractService` with tokio and stops if the process - /// receives the signal `SIGTERM` or `SIGINT`. It can run a full or a light node depending on - /// the node's configuration. - pub fn run_node( - self, - new_light: impl FnOnce(Configuration) -> sc_service::error::Result, - new_full: impl FnOnce(Configuration) -> sc_service::error::Result, - runtime_version: RuntimeVersion, - ) -> Result<()> - where - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, - { - match self.config.role { - Role::Light => self.run_light_node(new_light, runtime_version), - _ => self.run_full_node(new_full, runtime_version), - } - } - - /// A helper function that runs an `AbstractService` with tokio and stops if the process - /// receives the signal `SIGTERM` or `SIGINT`. It can only run a "full" node and will fail if - /// the node's configuration uses a "light" role. - pub fn run_full_node( - self, - new_full: impl FnOnce(Configuration) -> sc_service::error::Result, - runtime_version: RuntimeVersion, - ) -> Result<()> - where - S: AbstractService + Unpin, - { - if matches!(self.config.role, Role::Light) { - return Err("Light node has been requested but this is not implemented".into()); - } - - self.print_node_infos(runtime_version); - self.run_service_until_exit(new_full) - } - - /// A helper function that runs an `AbstractService` with tokio and stops if the process - /// receives the signal `SIGTERM` or `SIGINT`. It can only run a "light" node and will fail if - /// the node's configuration uses a "full" role. - pub fn run_light_node( - self, - new_light: impl FnOnce(Configuration) -> sc_service::error::Result, - runtime_version: RuntimeVersion, - ) -> Result<()> - where - S: AbstractService + Unpin, - { - if !matches!(self.config.role, Role::Light) { - return Err("Full node has been requested but this is not implemented".into()); - } - - self.print_node_infos(runtime_version); - self.run_service_until_exit(new_light) + info!("⛓ Native runtime: {}", C::native_runtime_version(&self.config.chain_spec)); } /// A helper function that runs a future with tokio and stops if the process receives the signal @@ -257,34 +199,18 @@ impl Runner { } } - fn run_service_until_exit(mut self, service_builder: F) -> Result<()> - where - F: FnOnce(Configuration) -> std::result::Result, - T: AbstractService + Unpin, - { - let service = service_builder(self.config)?; - - // we eagerly drop the service so that the internal exit future is fired, - // but we need to keep holding a reference to the global telemetry guard - // and drop the runtime first. - let _telemetry = service.telemetry(); - - // we hold a reference to the base path so if the base path is a temporary directory it will - // not be deleted before the tokio runtime finish to clean up - let _base_path = service.base_path(); - - { - let f = service.fuse(); - self.tokio_runtime - .block_on(main(f)) - .map_err(|e| e.to_string())?; - } - - // The `service` **must** have been destroyed here for the shutdown signal to propagate - // to all the tasks. Dropping `tokio_runtime` will block the thread until all tasks have - // shut down. - drop(self.tokio_runtime); - + /// A helper function that runs a node with tokio and stops if the process receives the signal + /// `SIGTERM` or `SIGINT`. + pub fn run_node_until_exit( + mut self, + initialise: impl FnOnce(Configuration) -> sc_service::error::Result, + ) -> Result<()> { + self.print_node_infos(); + let mut task_manager = initialise(self.config)?; + self.tokio_runtime.block_on(main(task_manager.future().fuse())) + .map_err(|e| e.to_string())?; + task_manager.terminate(); + drop(task_manager); Ok(()) } diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index b63c6f0bd7..a7c9a65546 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof, BlockBackend}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 1fbf301f5b..8c96f514dd 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -17,21 +17,20 @@ // along with this program. If not, see . use crate::{ - Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, + NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, status_sinks, metrics::MetricsService, client::{light, Client, ClientConfig}, config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWorkerConfig}, }; use sc_client_api::{ - self, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, - ExecutorProvider, CallExecutor, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, - backend::RemoteBackend, + self, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, ExecutorProvider, + ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, backend::RemoteBackend, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; use sc_chain_spec::get_extension; use sp_consensus::{ - block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, + block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, import_queue::ImportQueue, }; use futures::{ @@ -46,9 +45,9 @@ use sc_network::NetworkService; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, Zero, + Block as BlockT, NumberFor, SaturatedConversion, HashFor, Zero, BlockIdTo, }; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; use std::{ collections::HashMap, @@ -62,8 +61,15 @@ use prometheus_endpoint::Registry; use sc_client_db::{Backend, DatabaseSettings}; use sp_core::traits::CodeExecutor; use sp_runtime::BuildStorage; -use sc_client_api::execution_extensions::ExecutionExtensions; +use sc_client_api::{ + BlockBackend, BlockchainEvents, + backend::StorageProvider, + proof_provider::ProofProvider, + execution_extensions::ExecutionExtensions +}; use sp_core::storage::Storage; +use sp_blockchain::{HeaderMetadata, HeaderBackend}; +use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; pub type BackgroundTask = Pin + Send>>; @@ -878,11 +884,11 @@ pub trait ServiceBuilderCommand { ) -> Result; } -impl +impl ServiceBuilder< TBl, TRtApi, - Client, + TCl, Arc>, TSc, TImpQu, @@ -892,8 +898,12 @@ ServiceBuilder< TRpc, TBackend, > where - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + + StorageProvider + CallApiAt + + Send + 'static, + >::Api: sp_api::Metadata + sc_offchain::OffchainWorkerApi + sp_transaction_pool::runtime_api::TaggedTransactionQueue + @@ -903,7 +913,6 @@ ServiceBuilder< TBl: BlockT, TRtApi: 'static + Send + Sync, TBackend: 'static + sc_client_api::backend::Backend + Send, - TExec: 'static + CallExecutor + Send + Sync + Clone, TSc: Clone, TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, @@ -916,26 +925,12 @@ ServiceBuilder< Ok(self) } - fn build_common(self) -> Result, - TSc, - NetworkStatus, - NetworkService::Hash>, - TExPool, - sc_offchain::OffchainWorkers< - Client, - TBackend::OffchainStorage, - TBl - >, - >, Error> - where TExec: CallExecutor, - { + fn build_common(self) -> Result, Error> { let ServiceBuilder { marker: _, mut config, client, - task_manager, + mut task_manager, fetcher: on_demand, backend, keystore, @@ -949,17 +944,14 @@ ServiceBuilder< block_announce_validator_builder, } = self; + let chain_info = client.usage_info().chain; + sp_session::generate_initial_session_keys( client.clone(), - &BlockId::Hash(client.chain_info().best_hash), + &BlockId::Hash(chain_info.best_hash), config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), )?; - // A side-channel for essential tasks to communicate shutdown. - let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); - - let chain_info = client.chain_info(); - info!("📦 Highest known block at #{}", chain_info.best_number); telemetry!( SUBSTRATE_INFO; @@ -968,15 +960,16 @@ ServiceBuilder< "best" => ?chain_info.best_hash ); - let spawn_handle = task_manager.spawn_handle(); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let (network, network_status_sinks, network_future) = build_network( - &config, client.clone(), transaction_pool.clone(), Clone::clone(&spawn_handle), on_demand.clone(), - block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, - system_rpc_rx, import_queue + &config, client.clone(), transaction_pool.clone(), task_manager.spawn_handle(), + on_demand.clone(), block_announce_validator_builder, finality_proof_request_builder, + finality_proof_provider, system_rpc_rx, import_queue )?; + let spawn_handle = task_manager.spawn_handle(); + // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -1064,7 +1057,7 @@ ServiceBuilder< ); let rpc = start_rpc_servers(&config, gen_handler)?; // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = gen_handler(sc_rpc::DenyUnsafe::No); + let rpc_handlers = Arc::new(RpcHandlers(gen_handler(sc_rpc::DenyUnsafe::No))); let telemetry_connection_sinks: Arc>>> = Default::default(); @@ -1110,52 +1103,34 @@ ServiceBuilder< config.informant_output_format, )); - Ok(Service { + task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); + + Ok(ServiceComponents { client, task_manager, network, - network_status_sinks, select_chain, transaction_pool, - essential_failed_tx, - essential_failed_rx, rpc_handlers, - _rpc: rpc, - _telemetry: telemetry, - _offchain_workers: offchain_workers, - _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, - marker: PhantomData::, + offchain_workers, + telemetry_on_connect_sinks: TelemetryOnConnectSinks(telemetry_connection_sinks), + network_status_sinks: NetworkStatusSinks::new(network_status_sinks), prometheus_registry: config.prometheus_config.map(|config| config.registry), - _base_path: config.base_path.map(Arc::new), }) } /// Builds the light service. - pub fn build_light(self) -> Result, - TSc, - NetworkStatus, - NetworkService::Hash>, - TExPool, - sc_offchain::OffchainWorkers< - Client, - TBackend::OffchainStorage, - TBl - >, - >, Error> - where TExec: CallExecutor, - { + pub fn build_light(self) -> Result, Error> { self.build_common() } } -impl +impl ServiceBuilder< TBl, TRtApi, - Client, + TCl, Arc>, TSc, TImpQu, @@ -1165,8 +1140,12 @@ ServiceBuilder< TRpc, TBackend, > where - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + + StorageProvider + CallApiAt + + Send + 'static, + >::Api: sp_api::Metadata + sc_offchain::OffchainWorkerApi + sp_transaction_pool::runtime_api::TaggedTransactionQueue + @@ -1176,7 +1155,6 @@ ServiceBuilder< TBl: BlockT, TRtApi: 'static + Send + Sync, TBackend: 'static + sc_client_api::backend::Backend + Send, - TExec: 'static + CallExecutor + Send + Sync + Clone, TSc: Clone, TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + @@ -1187,21 +1165,7 @@ ServiceBuilder< { /// Builds the full service. - pub fn build_full(self) -> Result, - TSc, - NetworkStatus, - NetworkService::Hash>, - TExPool, - sc_offchain::OffchainWorkers< - Client, - TBackend::OffchainStorage, - TBl - >, - >, Error> - where TExec: CallExecutor, - { + pub fn build_full(self) -> Result, Error> { // make transaction pool available for off-chain runtime calls. self.client.execution_extensions() .register_transaction_pool(Arc::downgrade(&self.transaction_pool) as _); @@ -1233,18 +1197,16 @@ async fn transaction_notifications( } // Periodically notify the telemetry. -async fn telemetry_periodic_send( - client: Arc>, +async fn telemetry_periodic_send( + client: Arc, transaction_pool: Arc, mut metrics_service: MetricsService, network_status_sinks: Arc, NetworkState)>>> ) where TBl: BlockT, - TExec: CallExecutor, - Client: ProvideRuntimeApi, + TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, - TBackend: sc_client_api::backend::Backend, { let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); network_status_sinks.lock().push(std::time::Duration::from_millis(5000), state_tx); @@ -1322,11 +1284,11 @@ fn build_telemetry( (telemetry, future) } -fn gen_handler( +fn gen_handler( deny_unsafe: sc_rpc::DenyUnsafe, config: &Configuration, task_manager: &TaskManager, - client: Arc>, + client: Arc, transaction_pool: Arc, keystore: Arc>, on_demand: Option>>, @@ -1337,13 +1299,14 @@ fn gen_handler( ) -> jsonrpc_pubsub::PubSubHandler where TBl: BlockT, - TExec: CallExecutor + Send + Sync + 'static, - TRtApi: Send + Sync + 'static, - Client: ProvideRuntimeApi, + TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + + HeaderMetadata + ExecutorProvider + + CallApiAt + ProofProvider + + StorageProvider + BlockBackend + Send + Sync + 'static, TExPool: MaintainedTransactionPool::Hash> + 'static, TBackend: sc_client_api::backend::Backend + 'static, TRpc: sc_rpc::RpcExtension, - as ProvideRuntimeApi>::Api: + >::Api: sp_session::SessionKeys + sp_api::Metadata, { @@ -1412,15 +1375,14 @@ fn gen_handler( )) } -fn build_network( +fn build_network( config: &Configuration, - client: Arc>, + client: Arc, transaction_pool: Arc, spawn_handle: SpawnTaskHandle, on_demand: Option>>, block_announce_validator_builder: Option>) -> - Box + Send> + Send + dyn FnOnce(Arc) -> Box + Send> + Send >>, finality_proof_request_builder: Option>, finality_proof_provider: Option>>, @@ -1436,11 +1398,10 @@ fn build_network( > where TBl: BlockT, - TExec: CallExecutor + Send + Sync + 'static, - TRtApi: Send + Sync + 'static, - Client: ProvideRuntimeApi, + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + 'static, TExPool: MaintainedTransactionPool::Hash> + 'static, - TBackend: sc_client_api::backend::Backend + 'static, TImpQu: ImportQueue + 'static, { let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 922f34b656..2f101465d5 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -353,13 +353,6 @@ impl Client where self.executor.runtime_version(id) } - /// Get block hash by number. - pub fn block_hash(&self, - block_number: <::Header as HeaderT>::Number - ) -> sp_blockchain::Result> { - self.backend.blockchain().hash(block_number) - } - /// Reads given header and generates CHT-based header proof for CHT of given size. pub fn header_proof_with_cht_size( &self, @@ -1925,6 +1918,10 @@ impl BlockBackend for Client fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { self.backend.blockchain().justification(*id) } + + fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { + self.backend.blockchain().hash(number) + } } impl backend::AuxStore for Client diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 036c957773..c3c8f60e68 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -36,22 +36,15 @@ mod client; mod task_manager; use std::{io, pin::Pin}; -use std::marker::PhantomData; use std::net::SocketAddr; use std::collections::HashMap; use std::time::Duration; use wasm_timer::Instant; -use std::task::{Poll, Context}; +use std::task::Poll; use parking_lot::Mutex; -use client::Client; -use futures::{ - Future, FutureExt, Stream, StreamExt, - compat::*, - sink::SinkExt, - task::{Spawn, FutureObj, SpawnError}, -}; -use sc_network::{NetworkService, NetworkStatus, network_state::NetworkState, PeerId}; +use futures::{Future, FutureExt, Stream, StreamExt, compat::*}; +use sc_network::{NetworkStatus, network_state::NetworkState, PeerId}; use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; @@ -84,14 +77,9 @@ pub use sc_network::config::{ TransactionImportFuture, }; pub use sc_tracing::TracingReceiver; -pub use task_manager::{SpawnEssentialTaskHandle, SpawnTaskHandle}; -use task_manager::TaskManager; -use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_api::{ApiExt, ConstructRuntimeApi, ApiErrorExt}; -use sc_client_api::{ - Backend as BackendT, BlockchainEvents, CallExecutor, UsageProvider, -}; -use sp_block_builder::BlockBuilder; +pub use task_manager::SpawnTaskHandle; +pub use task_manager::TaskManager; +use sc_client_api::{Backend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -105,88 +93,10 @@ impl MallocSizeOfWasm for T {} #[cfg(target_os = "unknown")] impl MallocSizeOfWasm for T {} -/// Substrate service. -pub struct Service { - client: Arc, - task_manager: TaskManager, - select_chain: Option, - network: Arc, - // Sinks to propagate network status updates. - // For each element, every time the `Interval` fires we push an element on the sender. - network_status_sinks: Arc>>, - transaction_pool: Arc, - // Send a signal when a spawned essential task has concluded. The next time - // the service future is polled it should complete with an error. - essential_failed_tx: TracingUnboundedSender<()>, - // A receiver for spawned essential-tasks concluding. - essential_failed_rx: TracingUnboundedReceiver<()>, - rpc_handlers: sc_rpc_server::RpcHandler, - _rpc: Box, - _telemetry: Option, - _telemetry_on_connect_sinks: Arc>>>, - _offchain_workers: Option>, - keystore: sc_keystore::KeyStorePtr, - marker: PhantomData, - prometheus_registry: Option, - // The base path is kept here because it can be a temporary directory which will be deleted - // when dropped - _base_path: Option>, -} - -impl Unpin for Service {} - -/// Abstraction over a Substrate service. -pub trait AbstractService: Future> + Send + Unpin + Spawn + 'static { - /// Type of block of this chain. - type Block: BlockT; - /// Backend storage for the client. - type Backend: 'static + BackendT; - /// How to execute calls towards the runtime. - type CallExecutor: 'static + CallExecutor + Send + Sync + Clone; - /// API that the runtime provides. - type RuntimeApi: Send + Sync; - /// Chain selection algorithm. - type SelectChain: sp_consensus::SelectChain; - /// Transaction pool. - type TransactionPool: TransactionPool + MallocSizeOfWasm; - /// The generic Client type, the bounds here are the ones specifically required by - /// internal crates like sc_informant. - type Client: - HeaderMetadata + UsageProvider - + BlockchainEvents + HeaderBackend + Send + Sync; - - /// Get event stream for telemetry connection established events. - fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()>; - - /// return a shared instance of Telemetry (if enabled) - fn telemetry(&self) -> Option; - - /// Spawns a task in the background that runs the future passed as parameter. - /// - /// Information about this task will be reported to Prometheus. - /// - /// The task name is a `&'static str` as opposed to a `String`. The reason for that is that - /// in order to avoid memory consumption issues with the Prometheus metrics, the set of - /// possible task names has to be bounded. - #[deprecated(note = "Use `spawn_task_handle().spawn() instead.")] - fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static); - - /// Spawns a task in the background that runs the future passed as - /// parameter. The given task is considered essential, i.e. if it errors we - /// trigger a service exit. - #[deprecated(note = "Use `spawn_essential_task_handle().spawn() instead.")] - fn spawn_essential_task(&self, name: &'static str, task: impl Future + Send + 'static); - - /// Returns a handle for spawning essential tasks. Any task spawned through this handle is - /// considered essential, i.e. if it errors we trigger a service exit. - fn spawn_essential_task_handle(&self) -> SpawnEssentialTaskHandle; - - /// Returns a handle for spawning tasks. - fn spawn_task_handle(&self) -> SpawnTaskHandle; - - /// Returns the keystore that stores keys. - fn keystore(&self) -> sc_keystore::KeyStorePtr; +/// RPC handlers that can perform RPC queries. +pub struct RpcHandlers(sc_rpc_server::RpcHandler); +impl RpcHandlers { /// Starts an RPC query. /// /// The query is passed as a string and must be a JSON text similar to what an HTTP client @@ -196,178 +106,76 @@ pub trait AbstractService: Future> + Send + Unpin + S /// /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to /// send back spontaneous events. - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>>; - - /// Get shared client instance. - fn client(&self) -> Arc; - - /// Get clone of select chain. - fn select_chain(&self) -> Option; - - /// Get shared network instance. - fn network(&self) - -> Arc::Hash>>; - - /// Returns a receiver that periodically receives a status of the network. - fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)>; - - /// Get shared transaction pool instance. - fn transaction_pool(&self) -> Arc; - - /// Get a handle to a future that will resolve on exit. - #[deprecated(note = "Use `spawn_task`/`spawn_essential_task` instead, those functions will attach on_exit signal.")] - fn on_exit(&self) -> ::exit_future::Exit; - - /// Get the prometheus metrics registry, if available. - fn prometheus_registry(&self) -> Option; - - /// Get a clone of the base_path - fn base_path(&self) -> Option>; -} - -impl AbstractService for - Service, TSc, NetworkStatus, - NetworkService, TExPool, TOc> -where - TBl: BlockT, - TBackend: 'static + BackendT, - TExec: 'static + CallExecutor + Send + Sync + Clone, - TRtApi: 'static + Send + Sync + ConstructRuntimeApi>, - >>::RuntimeApi: - sp_api::Core - + ApiExt - + ApiErrorExt - + BlockBuilder, - TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, - TExPool: 'static + TransactionPool + MallocSizeOfWasm, - TOc: 'static + Send + Sync, -{ - type Block = TBl; - type Backend = TBackend; - type CallExecutor = TExec; - type RuntimeApi = TRtApi; - type SelectChain = TSc; - type TransactionPool = TExPool; - type Client = Client; - - fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()> { - let (sink, stream) = tracing_unbounded("mpsc_telemetry_on_connect"); - self._telemetry_on_connect_sinks.lock().push(sink); - stream - } - - fn telemetry(&self) -> Option { - self._telemetry.clone() - } - - fn keystore(&self) -> sc_keystore::KeyStorePtr { - self.keystore.clone() - } - - fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static) { - self.task_manager.spawn(name, task) - } - - fn spawn_essential_task(&self, name: &'static str, task: impl Future + Send + 'static) { - let mut essential_failed = self.essential_failed_tx.clone(); - let essential_task = std::panic::AssertUnwindSafe(task) - .catch_unwind() - .map(move |_| { - error!("Essential task `{}` failed. Shutting down service.", name); - let _ = essential_failed.send(()); - }); - - let _ = self.spawn_task_handle().spawn(name, essential_task); - } - - fn spawn_task_handle(&self) -> SpawnTaskHandle { - self.task_manager.spawn_handle() - } - - fn spawn_essential_task_handle(&self) -> SpawnEssentialTaskHandle { - SpawnEssentialTaskHandle::new( - self.essential_failed_tx.clone(), - self.task_manager.spawn_handle(), - ) - } - - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>> { - Box::pin( - self.rpc_handlers.handle_request(request, mem.metadata.clone()) - .compat() - .map(|res| res.expect("this should never fail")) - ) - } - - fn client(&self) -> Arc { - self.client.clone() - } - - fn select_chain(&self) -> Option { - self.select_chain.clone() + pub fn rpc_query(&self, mem: &RpcSession, request: &str) + -> Pin> + Send>> { + self.0.handle_request(request, mem.metadata.clone()) + .compat() + .map(|res| res.expect("this should never fail")) + .boxed() } +} - fn network(&self) - -> Arc::Hash>> - { - self.network.clone() +/// Sinks to propagate network status updates. +/// For each element, every time the `Interval` fires we push an element on the sender. +pub struct NetworkStatusSinks( + Arc, NetworkState)>>>, +); + +impl NetworkStatusSinks { + fn new( + sinks: Arc, NetworkState)>>> + ) -> Self { + Self(sinks) } - fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { + /// Returns a receiver that periodically receives a status of the network. + pub fn network_status(&self, interval: Duration) + -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = tracing_unbounded("mpsc_network_status"); - self.network_status_sinks.lock().push(interval, sink); + self.0.lock().push(interval, sink); stream } - - fn transaction_pool(&self) -> Arc { - self.transaction_pool.clone() - } - - fn on_exit(&self) -> exit_future::Exit { - self.task_manager.on_exit() - } - - fn prometheus_registry(&self) -> Option { - self.prometheus_registry.clone() - } - - fn base_path(&self) -> Option> { - self._base_path.clone() - } } -impl Future for - Service -{ - type Output = Result<(), Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); +/// Sinks to propagate telemetry connection established events. +pub struct TelemetryOnConnectSinks(pub Arc>>>); - match Pin::new(&mut this.essential_failed_rx).poll_next(cx) { - Poll::Pending => {}, - Poll::Ready(_) => { - // Ready(None) should not be possible since we hold a live - // sender. - return Poll::Ready(Err(Error::Other("Essential task failed.".into()))); - } - } - - // The service future never ends. - Poll::Pending +impl TelemetryOnConnectSinks { + /// Get event stream for telemetry connection established events. + pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { + let (sink, stream) =tracing_unbounded("mpsc_telemetry_on_connect"); + self.0.lock().push(sink); + stream } } -impl Spawn for - Service -{ - fn spawn_obj( - &self, - future: FutureObj<'static, ()> - ) -> Result<(), SpawnError> { - self.task_manager.spawn_handle().spawn("unnamed", future); - Ok(()) - } +/// The individual components of the chain, built by the service builder. You are encouraged to +/// deconstruct this into its fields. +pub struct ServiceComponents, TSc, TExPool, TCl> { + /// A blockchain client. + pub client: Arc, + /// A shared transaction pool instance. + pub transaction_pool: Arc, + /// The chain task manager. + pub task_manager: TaskManager, + /// A keystore that stores keys. + pub keystore: sc_keystore::KeyStorePtr, + /// A shared network instance. + pub network: Arc::Hash>>, + /// RPC handlers that can perform RPC queries. + pub rpc_handlers: Arc, + /// A shared instance of the chain selection algorithm. + pub select_chain: Option, + /// Sinks to propagate network status updates. + pub network_status_sinks: NetworkStatusSinks, + /// A prometheus metrics registry, (if enabled). + pub prometheus_registry: Option, + /// Shared Telemetry connection sinks, + pub telemetry_on_connect_sinks: TelemetryOnConnectSinks, + /// A shared offchain workers instance. + pub offchain_workers: Option>>, } /// Builds a never-ending future that continuously polls the network. diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs index 544d76fc47..b6cc260055 100644 --- a/client/service/src/task_manager.rs +++ b/client/service/src/task_manager.rs @@ -13,14 +13,15 @@ //! Substrate service tasks management module. -use std::{panic, result::Result}; +use std::{panic, result::Result, pin::Pin}; use exit_future::Signal; use log::debug; use futures::{ - Future, FutureExt, + Future, FutureExt, StreamExt, future::{select, Either, BoxFuture}, compat::*, task::{Spawn, FutureObj, SpawnError}, + sink::SinkExt, }; use prometheus_endpoint::{ exponential_buckets, register, @@ -28,8 +29,8 @@ use prometheus_endpoint::{ CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 }; use sc_client_api::CloneableSpawn; -use sp_utils::mpsc::TracingUnboundedSender; -use crate::config::{TaskExecutor, TaskType}; +use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use crate::{config::{TaskExecutor, TaskType}, Error}; mod prometheus_future; @@ -192,7 +193,6 @@ impl SpawnEssentialTaskHandle { task: impl Future + Send + 'static, task_type: TaskType, ) { - use futures::sink::SinkExt; let mut essential_failed = self.essential_failed_tx.clone(); let essential_task = std::panic::AssertUnwindSafe(task) .catch_unwind() @@ -216,6 +216,13 @@ pub struct TaskManager { executor: TaskExecutor, /// Prometheus metric where to report the polling times. metrics: Option, + /// Send a signal when a spawned essential task has concluded. The next time + /// the service future is polled it should complete with an error. + essential_failed_tx: TracingUnboundedSender<()>, + /// A receiver for spawned essential-tasks concluding. + essential_failed_rx: TracingUnboundedReceiver<()>, + /// Things to keep alive until the task manager is dropped. + keep_alive: Box, } impl TaskManager { @@ -226,6 +233,8 @@ impl TaskManager { prometheus_registry: Option<&Registry> ) -> Result { let (signal, on_exit) = exit_future::signal(); + // A side-channel for essential tasks to communicate shutdown. + let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); let metrics = prometheus_registry.map(Metrics::register).transpose()?; @@ -234,17 +243,15 @@ impl TaskManager { signal: Some(signal), executor, metrics, + essential_failed_tx, + essential_failed_rx, + keep_alive: Box::new(()), }) } - /// Spawn background/async task, which will be aware on exit signal. - /// - /// See also the documentation of [`SpawnTaskHandler::spawn`]. - pub(super) fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { - self.spawn_handle().spawn(name, task) - } - pub(super) fn spawn_handle(&self) -> SpawnTaskHandle { + /// Get a handle for spawning tasks. + pub fn spawn_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { on_exit: self.on_exit.clone(), executor: self.executor.clone(), @@ -252,18 +259,37 @@ impl TaskManager { } } - /// Clone on exit signal. - pub(super) fn on_exit(&self) -> exit_future::Exit { - self.on_exit.clone() + /// Get a handle for spawning essential tasks. + pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle { + SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) + } + + /// Return a future that will end if an essential task fails. + pub fn future<'a>(&'a mut self) -> Pin> + Send + 'a>> { + Box::pin(async move { + self.essential_failed_rx.next().await; + + Err(Error::Other("Essential task failed.".into())) + }) + } + + /// Signal to terminate all the running tasks. + pub fn terminate(&mut self) { + if let Some(signal) = self.signal.take() { + let _ = signal.fire(); + } + } + + /// Set what the task manager should keep alivei + pub(super) fn keep_alive(&mut self, to_keep_alive: T) { + self.keep_alive = Box::new(to_keep_alive); } } impl Drop for TaskManager { fn drop(&mut self) { debug!(target: "service", "Tasks manager shutdown"); - if let Some(signal) = self.signal.take() { - let _ = signal.fire(); - } + self.terminate(); } } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 994d846c6a..e72c290d43 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -40,7 +40,13 @@ use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOv use sp_consensus::{BlockOrigin}; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; -use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest}; +use sc_client_api::{ + blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, + RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, + RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, +}; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::{ diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 4ff89f5319..5a676e5263 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -19,18 +19,18 @@ //! Service integration test utils. use std::iter; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::Arc; use std::net::Ipv4Addr; use std::pin::Pin; use std::time::Duration; -use log::info; +use log::{info, debug}; use futures01::{Future, Stream, Poll}; use futures::{FutureExt as _, TryFutureExt as _}; use tempfile::TempDir; use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; use sc_service::{ - AbstractService, + TaskManager, GenericChainSpec, ChainSpecExtension, Configuration, @@ -39,12 +39,15 @@ use sc_service::{ Role, Error, TaskExecutor, + client::Client, }; use sp_blockchain::HeaderBackend; use sc_network::{multiaddr, Multiaddr}; use sc_network::config::{NetworkConfiguration, TransportConfig}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_transaction_pool::TransactionPool; +use sc_client_api::{Backend, CallExecutor}; +use parking_lot::Mutex; #[cfg(test)] mod client; @@ -54,47 +57,100 @@ const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, SyncService, U, Multiaddr)>, - full_nodes: Vec<(usize, SyncService, U, Multiaddr)>, - light_nodes: Vec<(usize, SyncService, Multiaddr)>, + authority_nodes: Vec<(usize, F, U, Multiaddr)>, + full_nodes: Vec<(usize, F, U, Multiaddr)>, + light_nodes: Vec<(usize, L, Multiaddr)>, chain_spec: GenericChainSpec, base_port: u16, nodes: usize, } -/// Wraps around an `Arc` and implements `Future`. -pub struct SyncService(Arc>); +pub trait TestNetNode: Clone + Future + Send + 'static { + type Block: BlockT; + type Backend: Backend; + type Executor: CallExecutor + Send + Sync; + type RuntimeApi: Send + Sync; + type TransactionPool: TransactionPool; -impl SyncService { - pub fn get(&self) -> MutexGuard { - self.0.lock().unwrap() - } + fn client(&self) -> Arc>; + fn transaction_pool(&self) -> Arc; + fn network(&self) -> Arc::Hash>>; } -impl Clone for SyncService { - fn clone(&self) -> Self { - Self(self.0.clone()) +pub struct TestNetComponents { + task_manager: Arc>, + client: Arc>, + transaction_pool: Arc, + network: Arc::Hash>>, +} + +impl +TestNetComponents { + pub fn new( + task_manager: TaskManager, + client: Arc>, + network: Arc::Hash>>, + transaction_pool: Arc, + ) -> Self { + Self { + client, transaction_pool, network, + task_manager: Arc::new(Mutex::new(task_manager)), + } } } -impl From for SyncService { - fn from(service: T) -> Self { - SyncService(Arc::new(Mutex::new(service))) + +impl Clone for +TestNetComponents { + fn clone(&self) -> Self { + Self { + task_manager: self.task_manager.clone(), + client: self.client.clone(), + transaction_pool: self.transaction_pool.clone(), + network: self.network.clone(), + } } } -impl> + Unpin> Future for SyncService { +impl Future for + TestNetComponents +{ type Item = (); type Error = sc_service::Error; fn poll(&mut self) -> Poll { - let mut f = self.0.lock().unwrap(); - futures::compat::Compat::new(&mut *f).poll() + futures::compat::Compat::new(&mut self.task_manager.lock().future()).poll() + } +} + +impl TestNetNode for +TestNetComponents + where + TBl: BlockT, + TBackend: sc_client_api::Backend + Send + Sync + 'static, + TExec: CallExecutor + Send + Sync + 'static, + TRtApi: Send + Sync + 'static, + TExPool: TransactionPool + Send + Sync + 'static, +{ + type Block = TBl; + type Backend = TBackend; + type Executor = TExec; + type RuntimeApi = TRtApi; + type TransactionPool = TExPool; + + fn client(&self) -> Arc> { + self.client.clone() + } + fn transaction_pool(&self) -> Arc { + self.transaction_pool.clone() + } + fn network(&self) -> Arc::Hash>> { + self.network.clone() } } impl TestNet -where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static +where F: Clone + Send + 'static, L: Clone + Send +'static, U: Clone + Send + 'static { pub fn run_until_all_full( &mut self, @@ -102,8 +158,8 @@ where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static light_predicate: LP, ) where - FP: Send + Fn(usize, &SyncService) -> bool + 'static, - LP: Send + Fn(usize, &SyncService) -> bool + 'static, + FP: Send + Fn(usize, &F) -> bool + 'static, + LP: Send + Fn(usize, &L) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); @@ -217,8 +273,8 @@ fn node_config TestNet where - F: AbstractService, - L: AbstractService, + F: TestNetNode, + L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, { @@ -276,10 +332,9 @@ impl TestNet where ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = authority(node_config).expect("Error creating test node service"); - let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -296,10 +351,9 @@ impl TestNet where ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = full(node_config).expect("Error creating test node service"); - let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -315,10 +369,10 @@ impl TestNet where &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(light(node_config).expect("Error creating test node service")); + let service = light(node_config).expect("Error creating test node service"); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.light_nodes.push((self.nodes, service, addr)); self.nodes += 1; } @@ -337,9 +391,9 @@ pub fn connectivity( E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, Fb: Fn(Configuration) -> Result, - F: AbstractService, + F: TestNetNode, Lb: Fn(Configuration) -> Result, - L: AbstractService, + L: TestNetNode, { const NUM_FULL_NODES: usize = 5; const NUM_LIGHT_NODES: usize = 5; @@ -363,19 +417,25 @@ pub fn connectivity( info!("Checking star topology"); let first_address = network.full_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()) + service.network().add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()) + service.network().add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - move |_index, service| service.get().network().num_connected() - == expected_full_connections, - move |_index, service| service.get().network().num_connected() - == expected_light_connections, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} light connections...", connected, expected_light_connections); + connected == expected_light_connections + }, ); network.runtime @@ -404,24 +464,30 @@ pub fn connectivity( for i in 0..max_nodes { if i != 0 { if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()) + service.network().add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()) + service.network().add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } } network.run_until_all_full( - move |_index, service| service.get().network().num_connected() - == expected_full_connections, - move |_index, service| service.get().network().num_connected() - == expected_light_connections, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} light connections...", connected, expected_light_connections); + connected == expected_light_connections + }, ); } temp.close().expect("Error removing temp dir"); @@ -436,9 +502,9 @@ pub fn sync( mut extrinsic_factory: ExF ) where Fb: Fn(Configuration) -> Result<(F, U), Error>, - F: AbstractService, + F: TestNetNode, Lb: Fn(Configuration) -> Result, - L: AbstractService, + L: TestNetNode, B: FnMut(&F, &mut U), ExF: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, @@ -468,39 +534,41 @@ pub fn sync( info!("Generating #{}", i + 1); } - make_block_and_import(&first_service.get(), first_user_data); + make_block_and_import(&first_service, first_user_data); } - (network.full_nodes[0].1).0.lock().unwrap().network().update_chain(); + network.full_nodes[0].1.network().update_chain(); network.full_nodes[0].3.clone() }; info!("Running sync"); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| - service.get().client().info().best_number == (NUM_BLOCKS as u32).into(), + service.client().info().best_number == (NUM_BLOCKS as u32).into(), |_index, service| - service.get().client().info().best_number == (NUM_BLOCKS as u32).into(), + service.client().info().best_number == (NUM_BLOCKS as u32).into(), ); info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); let first_user_data = &network.full_nodes[0].2; - let best_block = BlockId::number(first_service.get().client().info().best_number); - let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); + let best_block = BlockId::number(first_service.client().info().best_number); + let extrinsic = extrinsic_factory(&first_service, first_user_data); let source = sp_transaction_pool::TransactionSource::External; futures::executor::block_on( - first_service.get().transaction_pool().submit_one(&best_block, source, extrinsic) + first_service.transaction_pool().submit_one(&best_block, source, extrinsic) ).expect("failed to submit extrinsic"); network.run_until_all_full( - |_index, service| service.get().transaction_pool().ready().count() == 1, + |_index, service| service.transaction_pool().ready().count() == 1, |_index, _service| true, ); } @@ -512,9 +580,9 @@ pub fn consensus( authorities: impl IntoIterator ) where Fb: Fn(Configuration) -> Result, - F: AbstractService, + F: TestNetNode, Lb: Fn(Configuration) -> Result, - L: AbstractService, + L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, { @@ -534,19 +602,22 @@ pub fn consensus( info!("Checking consensus"); let first_address = network.authority_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| - service.get().client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), + service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), |_index, service| - service.get().client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), + service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), ); info!("Adding more peers"); @@ -559,15 +630,17 @@ pub fn consensus( (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), ); for (_, service, _, _) in network.full_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); } network.run_until_all_full( |_index, service| - service.get().client().info().finalized_number >= (NUM_BLOCKS as u32).into(), + service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), |_index, service| - service.get().client().info().best_number >= (NUM_BLOCKS as u32).into(), + service.client().info().best_number >= (NUM_BLOCKS as u32).into(), ); } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 706a7b6e95..a74bd3258e 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -19,6 +19,7 @@ use sc_service::client::Client; use sc_client_api::backend::Finalizer; +use sc_client_api::client::BlockBackend; use sp_consensus::{ BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 799fe9788c..c8034d9466 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -19,14 +19,15 @@ use futures01::sync::mpsc as mpsc01; use log::{debug, info}; use sc_network::config::TransportConfig; use sc_service::{ - AbstractService, RpcSession, Role, Configuration, + RpcSession, Role, Configuration, TaskManager, RpcHandlers, config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, GenericChainSpec, RuntimeGenesis }; use wasm_bindgen::prelude::*; -use futures::{prelude::*, channel::{oneshot, mpsc}, future::{poll_fn, ok}, compat::*}; -use std::task::Poll; -use std::pin::Pin; +use futures::{ + prelude::*, channel::{oneshot, mpsc}, compat::*, future::{ready, ok, select} +}; +use std::{sync::Arc, pin::Pin}; use sc_chain_spec::Extension; use libp2p_wasm_ext::{ExtTransport, ffi}; @@ -120,31 +121,25 @@ struct RpcMessage { } /// Create a Client object that connects to a service. -pub fn start_client(mut service: impl AbstractService) -> Client { +pub fn start_client(mut task_manager: TaskManager, rpc_handlers: Arc) -> Client { // We dispatch a background task responsible for processing the service. // // The main action performed by the code below consists in polling the service with // `service.poll()`. // The rest consists in handling RPC requests. - let (rpc_send_tx, mut rpc_send_rx) = mpsc::unbounded::(); - wasm_bindgen_futures::spawn_local(poll_fn(move |cx| { - loop { - match Pin::new(&mut rpc_send_rx).poll_next(cx) { - Poll::Ready(Some(message)) => { - let fut = service - .rpc_query(&message.session, &message.rpc_json) - .boxed(); - let _ = message.send_back.send(fut); - }, - Poll::Pending => break, - Poll::Ready(None) => return Poll::Ready(()), - } - } - - Pin::new(&mut service) - .poll(cx) - .map(drop) - })); + let (rpc_send_tx, rpc_send_rx) = mpsc::unbounded::(); + wasm_bindgen_futures::spawn_local( + select( + rpc_send_rx.for_each(move |message| { + let fut = rpc_handlers.rpc_query(&message.session, &message.rpc_json); + let _ = message.send_back.send(fut); + ready(()) + }), + Box::pin(async move { + let _ = task_manager.future().await; + }), + ).map(drop) + ); Client { rpc_send_tx, -- GitLab From 1b34df8bf27a8e49fdf928890491253144099351 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 30 Jun 2020 16:06:16 +0300 Subject: [PATCH 099/144] Block production integration benchmark (#6468) * proposer benchmark * update cargo.lock --- Cargo.lock | 7 + bin/node/bench/Cargo.toml | 7 + bin/node/bench/src/common.rs | 48 ++++ bin/node/bench/src/construct.rs | 296 ++++++++++++++++++++++++ bin/node/bench/src/import.rs | 38 +-- bin/node/bench/src/main.rs | 38 ++- bin/node/testing/src/bench.rs | 209 ++++++++++------- primitives/transaction-pool/src/pool.rs | 4 +- 8 files changed, 523 insertions(+), 124 deletions(-) create mode 100644 bin/node/bench/src/common.rs create mode 100644 bin/node/bench/src/construct.rs diff --git a/Cargo.lock b/Cargo.lock index 8b0273d199..94f3f5effe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3362,6 +3362,7 @@ version = "0.8.0-rc4" dependencies = [ "derive_more", "fs_extra", + "futures 0.3.5", "hash-db", "hex", "kvdb", @@ -3374,13 +3375,19 @@ dependencies = [ "parity-db", "parity-util-mem", "rand 0.7.3", + "sc-basic-authorship", "sc-cli", "sc-client-api", "serde", "serde_json", + "sp-consensus", "sp-core", + "sp-finality-tracker", + "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-timestamp", + "sp-transaction-pool", "sp-trie", "structopt", "tempfile", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index ab156635ec..07db27a1f1 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -25,6 +25,12 @@ kvdb = "0.6" kvdb-rocksdb = "0.8" sp-trie = { version = "2.0.0-rc4", path = "../../../primitives/trie" } sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } +sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } +sc-basic-authorship = { version = "0.8.0-rc4", path = "../../../client/basic-authorship" } +sp-inherents = { version = "2.0.0-rc4", path = "../../../primitives/inherents" } +sp-finality-tracker = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/timestamp" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" @@ -33,3 +39,4 @@ rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.1.2" } +futures = "0.3.1" diff --git a/bin/node/bench/src/common.rs b/bin/node/bench/src/common.rs new file mode 100644 index 0000000000..2637d6e9bd --- /dev/null +++ b/bin/node/bench/src/common.rs @@ -0,0 +1,48 @@ + +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[derive(Clone, Copy, Debug, derive_more::Display)] +pub enum SizeType { + #[display(fmt = "empty")] + Empty, + #[display(fmt = "small")] + Small, + #[display(fmt = "medium")] + Medium, + #[display(fmt = "large")] + Large, + #[display(fmt = "full")] + Full, + #[display(fmt = "custom")] + Custom(usize), +} + +impl SizeType { + pub fn transactions(&self) -> Option { + match self { + SizeType::Empty => Some(0), + SizeType::Small => Some(10), + SizeType::Medium => Some(100), + SizeType::Large => Some(500), + SizeType::Full => None, + // Custom SizeType will use the `--transactions` input parameter + SizeType::Custom(val) => Some(*val), + } + } +} \ No newline at end of file diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs new file mode 100644 index 0000000000..e23594dd43 --- /dev/null +++ b/bin/node/bench/src/construct.rs @@ -0,0 +1,296 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Block construction benchmark. +//! +//! This benchmark is expected to measure block construction. +//! We want to protect against cold-cache attacks, and so this +//! benchmark should not rely on any caching (except those entries that +//! DO NOT depend on user input). Thus transaction generation should be +//! based on randomized data. + +use std::{ + borrow::Cow, + collections::HashMap, + pin::Pin, + sync::Arc, +}; +use futures::Future; + +use node_primitives::Block; +use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; +use sp_runtime::{ + generic::BlockId, + traits::NumberFor, + OpaqueExtrinsic, +}; +use sp_transaction_pool::{ + ImportNotificationStream, + PoolFuture, + PoolStatus, + TransactionFor, + TransactionSource, + TransactionStatusStreamFor, + TxHash, +}; +use sp_consensus::{Environment, Proposer, RecordProof}; + +use crate::{ + common::SizeType, + core::{self, Path, Mode}, +}; + +pub struct ConstructionBenchmarkDescription { + pub profile: Profile, + pub key_types: KeyTypes, + pub block_type: BlockType, + pub size: SizeType, + pub database_type: DatabaseType, +} + +pub struct ConstructionBenchmark { + profile: Profile, + database: BenchDb, + transactions: Transactions, +} + +impl core::BenchmarkDescription for ConstructionBenchmarkDescription { + fn path(&self) -> Path { + + let mut path = Path::new(&["node", "proposer"]); + + match self.profile { + Profile::Wasm => path.push("wasm"), + Profile::Native => path.push("native"), + } + + match self.key_types { + KeyTypes::Sr25519 => path.push("sr25519"), + KeyTypes::Ed25519 => path.push("ed25519"), + } + + match self.block_type { + BlockType::RandomTransfersKeepAlive => path.push("transfer"), + BlockType::RandomTransfersReaping => path.push("transfer_reaping"), + BlockType::Noop => path.push("noop"), + } + + match self.database_type { + DatabaseType::RocksDb => path.push("rocksdb"), + DatabaseType::ParityDb => path.push("paritydb"), + } + + path.push(&format!("{}", self.size)); + + path + } + + fn setup(self: Box) -> Box { + let mut extrinsics: Vec> = Vec::new(); + + let mut bench_db = BenchDb::with_key_types( + self.database_type, + 50_000, + self.key_types + ); + + let client = bench_db.client(); + + let content_type = self.block_type.to_content(self.size.transactions()); + for transaction in bench_db.block_content(content_type, &client) { + extrinsics.push(Arc::new(transaction.into())); + } + + Box::new(ConstructionBenchmark { + profile: self.profile, + database: bench_db, + transactions: Transactions(extrinsics), + }) + } + + fn name(&self) -> Cow<'static, str> { + format!( + "Block construction ({:?}/{}, {:?}, {:?} backend)", + self.block_type, + self.size, + self.profile, + self.database_type, + ).into() + } +} + +impl core::Benchmark for ConstructionBenchmark { + fn run(&mut self, mode: Mode) -> std::time::Duration { + let context = self.database.create_context(self.profile); + + let _ = context.client.runtime_version_at(&BlockId::Number(0)) + .expect("Failed to get runtime version") + .spec_version; + + if mode == Mode::Profile { + std::thread::park_timeout(std::time::Duration::from_secs(3)); + } + + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( + context.client.clone(), + self.transactions.clone().into(), + None, + ); + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + inherent_data_providers + .register_provider(sp_timestamp::InherentDataProvider) + .expect("Failed to register timestamp data provider"); + + let start = std::time::Instant::now(); + + let proposer = futures::executor::block_on(proposer_factory.init( + &context.client.header(&BlockId::number(0)) + .expect("Database error querying block #0") + .expect("Block #0 should exist"), + )).expect("Proposer initialization failed"); + + let _block = futures::executor::block_on( + proposer.propose( + inherent_data_providers.create_inherent_data().expect("Create inherent data failed"), + Default::default(), + std::time::Duration::from_secs(20), + RecordProof::Yes, + ), + ).map(|r| r.block).expect("Proposing failed"); + + let elapsed = start.elapsed(); + + if mode == Mode::Profile { + std::thread::park_timeout(std::time::Duration::from_secs(1)); + } + + elapsed + } +} + +#[derive(Clone, Debug)] +pub struct PoolTransaction { + data: OpaqueExtrinsic, + hash: node_primitives::Hash, +} + +impl From for PoolTransaction { + fn from(e: OpaqueExtrinsic) -> Self { + PoolTransaction { + data: e, + hash: node_primitives::Hash::zero(), + } + } +} + +impl sp_transaction_pool::InPoolTransaction for PoolTransaction { + type Transaction = OpaqueExtrinsic; + type Hash = node_primitives::Hash; + + fn data(&self) -> &Self::Transaction { + &self.data + } + + fn hash(&self) -> &Self::Hash { + &self.hash + } + + fn priority(&self) -> &u64 { unimplemented!() } + + fn longevity(&self) -> &u64 { unimplemented!() } + + fn requires(&self) -> &[Vec] { unimplemented!() } + + fn provides(&self) -> &[Vec] { unimplemented!() } + + fn is_propagable(&self) -> bool { unimplemented!() } +} + +#[derive(Clone, Debug)] +pub struct Transactions(Vec>); + +impl sp_transaction_pool::TransactionPool for Transactions { + type Block = Block; + type Hash = node_primitives::Hash; + type InPoolTransaction = PoolTransaction; + type Error = sp_transaction_pool::error::Error; + + /// Returns a future that imports a bunch of unverified transactions to the pool. + fn submit_at( + &self, + _at: &BlockId, + _source: TransactionSource, + _xts: Vec>, + ) -> PoolFuture>, Self::Error> { + unimplemented!() + } + + /// Returns a future that imports one unverified transaction to the pool. + fn submit_one( + &self, + _at: &BlockId, + _source: TransactionSource, + _xt: TransactionFor, + ) -> PoolFuture, Self::Error> { + unimplemented!() + } + + fn submit_and_watch( + &self, + _at: &BlockId, + _source: TransactionSource, + _xt: TransactionFor, + ) -> PoolFuture>, Self::Error> { + unimplemented!() + } + + fn ready_at(&self, _at: NumberFor) + -> Pin> + Send>> + Send>> + { + let iter: Box> + Send> = Box::new(self.0.clone().into_iter()); + Box::pin(futures::future::ready(iter)) + } + + fn ready(&self) -> Box> + Send> { + unimplemented!() + } + + fn remove_invalid(&self, _hashes: &[TxHash]) -> Vec> { + Default::default() + } + + fn status(&self) -> PoolStatus { + unimplemented!() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + unimplemented!() + } + + fn on_broadcasted(&self, _propagations: HashMap, Vec>) { + unimplemented!() + } + + fn hash_of(&self, _xt: &TransactionFor) -> TxHash { + unimplemented!() + } + + fn ready_transaction(&self, _hash: &TxHash) -> Option> { + unimplemented!() + } +} \ No newline at end of file diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index c1b324c03c..e49a359fb6 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -38,37 +38,10 @@ use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; -use crate::core::{self, Path, Mode}; - -#[derive(Clone, Copy, Debug, derive_more::Display)] -pub enum SizeType { - #[display(fmt = "empty")] - Empty, - #[display(fmt = "small")] - Small, - #[display(fmt = "medium")] - Medium, - #[display(fmt = "large")] - Large, - #[display(fmt = "full")] - Full, - #[display(fmt = "custom")] - Custom(usize), -} - -impl SizeType { - pub fn transactions(&self) -> Option { - match self { - SizeType::Empty => Some(0), - SizeType::Small => Some(10), - SizeType::Medium => Some(100), - SizeType::Large => Some(500), - SizeType::Full => None, - // Custom SizeType will use the `--transactions` input parameter - SizeType::Custom(val) => Some(*val), - } - } -} +use crate::{ + common::SizeType, + core::{self, Path, Mode}, +}; pub struct ImportBenchmarkDescription { pub profile: Profile, @@ -134,8 +107,9 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( - "Import benchmark ({:?}, {:?}, {:?} backend)", + "Block import ({:?}/{}, {:?}, {:?} backend)", self.block_type, + self.size, self.profile, self.database_type, ).into() diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 5c5af37038..1182024711 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -16,21 +16,29 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +mod common; +mod construct; #[macro_use] mod core; mod import; -mod trie; -mod simple_trie; mod generator; -mod tempdb; +mod simple_trie; mod state_sizes; +mod tempdb; +mod trie; -use crate::core::{run_benchmark, Mode as BenchmarkMode}; -use crate::tempdb::DatabaseType; -use import::{ImportBenchmarkDescription, SizeType}; -use trie::{TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription, DatabaseSize}; -use node_testing::bench::{Profile, KeyTypes, BlockType, DatabaseType as BenchDataBaseType}; use structopt::StructOpt; +use node_testing::bench::{Profile, KeyTypes, BlockType, DatabaseType as BenchDataBaseType}; + +use crate::{ + common::SizeType, + core::{run_benchmark, Mode as BenchmarkMode}, + tempdb::DatabaseType, + import::ImportBenchmarkDescription, + trie::{TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription, DatabaseSize}, + construct::ConstructionBenchmarkDescription, +}; + #[derive(Debug, StructOpt)] #[structopt(name = "node-bench", about = "Node integration benchmarks")] struct Opt { @@ -126,6 +134,20 @@ fn main() { ] .iter().map(move |db_type| (size, db_type))) => TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type }, + ConstructionBenchmarkDescription { + profile: Profile::Wasm, + key_types: KeyTypes::Sr25519, + block_type: BlockType::RandomTransfersKeepAlive, + size: SizeType::Medium, + database_type: BenchDataBaseType::RocksDb, + }, + ConstructionBenchmarkDescription { + profile: Profile::Wasm, + key_types: KeyTypes::Sr25519, + block_type: BlockType::RandomTransfersKeepAlive, + size: SizeType::Large, + database_type: BenchDataBaseType::RocksDb, + }, ); if opt.list { diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 5df2709f87..507d3420d8 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -152,20 +152,12 @@ impl BlockType { } /// Content of the generated block. +#[derive(Clone, Debug)] pub struct BlockContent { block_type: BlockType, size: Option, } -impl BlockContent { - fn iter_while(&self, mut f: impl FnMut(usize) -> bool) { - match self.size { - Some(v) => { for i in 0..v { if !f(i) { break; }}} - None => { for i in 0.. { if !f(i) { break; }}} - } - } -} - /// Type of backend database. #[derive(Debug, PartialEq, Clone, Copy)] pub enum DatabaseType { @@ -219,6 +211,93 @@ impl CloneableSpawn for TaskExecutor { } } +/// Iterator for block content. +pub struct BlockContentIterator<'a> { + iteration: usize, + content: BlockContent, + runtime_version: sc_executor::RuntimeVersion, + genesis_hash: node_primitives::Hash, + keyring: &'a BenchKeyring, +} + +impl<'a> BlockContentIterator<'a> { + fn new(content: BlockContent, keyring: &'a BenchKeyring, client: &Client) -> Self { + let runtime_version = client.runtime_version_at(&BlockId::number(0)) + .expect("There should be runtime version at 0"); + + let genesis_hash = client.block_hash(Zero::zero()) + .expect("Database error?") + .expect("Genesis block always exists; qed") + .into(); + + BlockContentIterator { + iteration: 0, + content, + keyring, + runtime_version, + genesis_hash, + } + } +} + +impl<'a> Iterator for BlockContentIterator<'a> { + type Item = OpaqueExtrinsic; + + fn next(&mut self) -> Option { + if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { + return None; + } + + let sender = self.keyring.at(self.iteration); + let receiver = get_account_id_from_seed::( + &format!("random-user//{}", self.iteration) + ); + + let signed = self.keyring.sign( + CheckedExtrinsic { + signed: Some((sender, signed_extra(0, node_runtime::ExistentialDeposit::get() + 1))), + function: match self.content.block_type { + BlockType::RandomTransfersKeepAlive => { + Call::Balances( + BalancesCall::transfer_keep_alive( + pallet_indices::address::Address::Id(receiver), + node_runtime::ExistentialDeposit::get() + 1, + ) + ) + }, + BlockType::RandomTransfersReaping => { + Call::Balances( + BalancesCall::transfer( + pallet_indices::address::Address::Id(receiver), + // Transfer so that ending balance would be 1 less than existential deposit + // so that we kill the sender account. + 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), + ) + ) + }, + BlockType::Noop => { + Call::System( + SystemCall::remark(Vec::new()) + ) + }, + }, + }, + self.runtime_version.spec_version, + self.runtime_version.transaction_version, + self.genesis_hash.into(), + ); + + let encoded = Encode::encode(&signed); + + let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) + .expect("Failed to decode opaque"); + + self.iteration += 1; + + Some(opaque) + } +} + impl BenchDb { /// New immutable benchmarking database. /// @@ -288,8 +367,33 @@ impl BenchDb { (client, backend) } - /// Generate new block using this database. - pub fn generate_block(&mut self, content: BlockContent) -> Block { + /// Generate list of required inherents. + /// + /// Uses already instantiated Client. + pub fn generate_inherents(&mut self, client: &Client) -> Vec { + let mut inherent_data = InherentData::new(); + let timestamp = 1 * MinimumPeriod::get(); + + inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) + .expect("Put timestamp failed"); + inherent_data.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &0) + .expect("Put finality tracker failed"); + + client.runtime_api() + .inherent_extrinsics_with_context( + &BlockId::number(0), + ExecutionContext::BlockConstruction, + inherent_data, + ).expect("Get inherents failed") + } + + /// Iterate over some block content with transaction signed using this database keyring. + pub fn block_content(&self, content: BlockContent, client: &Client) -> BlockContentIterator { + BlockContentIterator::new(content, &self.keyring, client) + } + + /// Get cliet for this database operations. + pub fn client(&mut self) -> Client { let (client, _backend) = Self::bench_client( self.database_type, self.directory_guard.path(), @@ -297,92 +401,33 @@ impl BenchDb { &self.keyring, ); - let runtime_version = client.runtime_version_at(&BlockId::number(0)) - .expect("There should be runtime version at 0"); + client + } - let genesis_hash = client.block_hash(Zero::zero()) - .expect("Database error?") - .expect("Genesis block always exists; qed") - .into(); + /// Generate new block using this database. + pub fn generate_block(&mut self, content: BlockContent) -> Block { + let client = self.client(); let mut block = client .new_block(Default::default()) .expect("Block creation failed"); - let timestamp = 1 * MinimumPeriod::get(); - - let mut inherent_data = InherentData::new(); - inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) - .expect("Put timestamp failed"); - inherent_data.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &0) - .expect("Put finality tracker failed"); - - for extrinsic in client.runtime_api() - .inherent_extrinsics_with_context( - &BlockId::number(0), - ExecutionContext::BlockConstruction, - inherent_data, - ).expect("Get inherents failed") - { + for extrinsic in self.generate_inherents(&client) { block.push(extrinsic).expect("Push inherent failed"); } let start = std::time::Instant::now(); - content.iter_while(|iteration| { - let sender = self.keyring.at(iteration); - let receiver = get_account_id_from_seed::( - &format!("random-user//{}", iteration) - ); - - let signed = self.keyring.sign( - CheckedExtrinsic { - signed: Some((sender, signed_extra(0, node_runtime::ExistentialDeposit::get() + 1))), - function: match content.block_type { - BlockType::RandomTransfersKeepAlive => { - Call::Balances( - BalancesCall::transfer_keep_alive( - pallet_indices::address::Address::Id(receiver), - node_runtime::ExistentialDeposit::get() + 1, - ) - ) - }, - BlockType::RandomTransfersReaping => { - Call::Balances( - BalancesCall::transfer( - pallet_indices::address::Address::Id(receiver), - // Transfer so that ending balance would be 1 less than existential deposit - // so that we kill the sender account. - 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), - ) - ) - }, - BlockType::Noop => { - Call::System( - SystemCall::remark(Vec::new()) - ) - }, - }, - }, - runtime_version.spec_version, - runtime_version.transaction_version, - genesis_hash, - ); - - let encoded = Encode::encode(&signed); - - let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) - .expect("Failed to decode opaque"); - + for opaque in self.block_content(content, &client) { match block.push(opaque) { Err(sp_blockchain::Error::ApplyExtrinsicFailed( sp_blockchain::ApplyExtrinsicFailed::Validity(e) )) if e.exhausted_resources() => { - return false; + break; }, Err(err) => panic!("Error pushing transaction: {:?}", err), - Ok(_) => true, + Ok(_) => {}, } - }); + }; let block = block.build().expect("Block build failed").block; @@ -411,7 +456,7 @@ impl BenchDb { ); BenchContext { - client, backend, db_guard: directory_guard, + client: Arc::new(client), backend, db_guard: directory_guard, } } } @@ -543,7 +588,7 @@ impl Guard { /// Benchmarking/test context holding instantiated client and backend references. pub struct BenchContext { /// Node client. - pub client: Client, + pub client: Arc, /// Node backend. pub backend: Arc, diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index b00c283ac7..848c6f9e17 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -23,7 +23,7 @@ use std::{ sync::Arc, pin::Pin, }; -use futures::{Future, Stream,}; +use futures::{Future, Stream}; use serde::{Deserialize, Serialize}; use sp_utils::mpsc; use sp_runtime::{ @@ -164,7 +164,7 @@ pub trait InPoolTransaction { /// Get priority of the transaction. fn priority(&self) -> &TransactionPriority; /// Get longevity of the transaction. - fn longevity(&self) ->&TransactionLongevity; + fn longevity(&self) -> &TransactionLongevity; /// Get transaction dependencies. fn requires(&self) -> &[TransactionTag]; /// Get tags that transaction provides. -- GitLab From 329d538781390cb896692f03e966e159b7602e9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 30 Jun 2020 16:04:15 +0200 Subject: [PATCH 100/144] Make the `OnRuntimeUpgrade` docs more clear (#6542) --- frame/support/src/traits.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index b36559c363..f7e7710b32 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1431,11 +1431,19 @@ impl OnInitialize for Tuple { } } -/// The runtime upgrade trait. Implementing this lets you express what should happen -/// when the runtime upgrades, and changes may need to occur to your module. +/// The runtime upgrade trait. +/// +/// Implementing this lets you express what should happen when the runtime upgrades, +/// and changes may need to occur to your module. pub trait OnRuntimeUpgrade { /// Perform a module upgrade. /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// /// Return the non-negotiable weight consumed for runtime upgrade. fn on_runtime_upgrade() -> crate::weights::Weight { 0 } } -- GitLab From e8f901868997be15635cba9b21a99b212009adc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 30 Jun 2020 16:44:52 +0200 Subject: [PATCH 101/144] Support synching of blocks that are not `new_best` (#6508) * Start * Remove debug println * Add tests --- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/protocol.rs | 4 +- client/network/src/protocol/sync.rs | 48 ++++++---- client/network/src/service/tests.rs | 2 +- client/network/test/src/block_import.rs | 8 +- client/network/test/src/lib.rs | 92 +++++++++++++++++-- client/network/test/src/sync.rs | 48 +++++++++- client/service/src/builder.rs | 2 +- .../consensus/common/src/block_validation.rs | 19 ++-- 9 files changed, 172 insertions(+), 53 deletions(-) diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index ffd8f1c8c6..b94c37d07e 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -106,7 +106,7 @@ impl TestNetFactory for GrandpaTestNet { _cfg: &ProtocolConfig, _: &PeerData, ) -> Self::Verifier { - PassThroughVerifier(false) // use non-instant finality. + PassThroughVerifier::new(false) // use non-instant finality. } fn make_block_import(&self, client: PeersClient) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 90076552a7..ff3748bd55 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1287,7 +1287,7 @@ impl Protocol { } let is_best = self.context_data.chain.info().best_hash == hash; - debug!(target: "sync", "Reannouncing block {:?}", hash); + debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); self.send_announcement(&header, data, is_best, true) } @@ -2160,7 +2160,7 @@ mod tests { reserved_only: false, priority_groups: Vec::new(), }, - Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + Box::new(DefaultBlockAnnounceValidator), None, Default::default(), None, diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index c3e87ca19a..bfd8c4fe21 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -48,6 +48,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion, Hash, HashFor} }; +use sp_arithmetic::traits::Saturating; use std::{fmt, ops::Range, collections::{HashMap, HashSet, VecDeque}, sync::Arc}; mod blocks; @@ -388,7 +389,7 @@ impl ChainSync { /// Returns the current sync status. pub fn status(&self) -> Status { - let best_seen = self.peers.values().max_by_key(|p| p.best_number).map(|p| p.best_number); + let best_seen = self.peers.values().map(|p| p.best_number).max(); let sync_state = if let Some(n) = best_seen { // A chain is classified as downloading if the provided best block is @@ -1186,6 +1187,21 @@ impl ChainSync { peer.recently_announced.pop_front(); } peer.recently_announced.push_back(hash.clone()); + + // Let external validator check the block announcement. + let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); + let is_best = match self.block_announce_validator.validate(&header, assoc_data) { + Ok(Validation::Success { is_new_best }) => is_new_best || is_best, + Ok(Validation::Failure) => { + debug!(target: "sync", "Block announcement validation of block {} from {} failed", hash, who); + return OnBlockAnnounce::Nothing + } + Err(e) => { + error!(target: "sync", "💔 Block announcement validation errored: {}", e); + return OnBlockAnnounce::Nothing + } + }; + if is_best { // update their best block peer.best_number = number; @@ -1216,20 +1232,6 @@ impl ChainSync { return OnBlockAnnounce::Nothing } - // Let external validator check the block announcement. - let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); - match self.block_announce_validator.validate(&header, assoc_data) { - Ok(Validation::Success) => (), - Ok(Validation::Failure) => { - debug!(target: "sync", "Block announcement validation of block {} from {} failed", hash, who); - return OnBlockAnnounce::Nothing - } - Err(e) => { - error!(target: "sync", "💔 Block announcement validation errored: {}", e); - return OnBlockAnnounce::Nothing - } - } - if ancient_parent { trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); return OnBlockAnnounce::Nothing @@ -1428,14 +1430,24 @@ fn peer_block_request( max_parallel_downloads, MAX_DOWNLOAD_AHEAD, ) { + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + + let from = if peer.best_number == last { + message::FromBlock::Hash(peer.best_hash) + } else { + message::FromBlock::Number(last) + }; + let request = message::generic::BlockRequest { id: 0, fields: attrs.clone(), - from: message::FromBlock::Number(range.start), + from, to: None, - direction: message::Direction::Ascending, + direction: message::Direction::Descending, max: Some((range.end - range.start).saturated_into::()) }; + Some((range, request)) } else { None @@ -1558,7 +1570,7 @@ mod test { let client = Arc::new(TestClientBuilder::new().build()); let info = client.info(); - let block_announce_validator = Box::new(DefaultBlockAnnounceValidator::new(client.clone())); + let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); let mut sync = ChainSync::new( diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index c027c3be73..17d9553fa6 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -104,7 +104,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), import_queue, block_announce_validator: Box::new( - sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone()), + sp_consensus::block_validation::DefaultBlockAnnounceValidator, ), metrics_registry: None, }) diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 46a395700c..6762b74b6b 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -59,7 +59,7 @@ fn import_single_good_block_works() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier(true) + &mut PassThroughVerifier::new(true) ) { Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} @@ -74,7 +74,7 @@ fn import_single_good_known_block_is_ignored() { &mut client, BlockOrigin::File, block, - &mut PassThroughVerifier(true) + &mut PassThroughVerifier::new(true) ) { Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} _ => panic!() @@ -89,7 +89,7 @@ fn import_single_good_block_without_header_fails() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier(true) + &mut PassThroughVerifier::new(true) ) { Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} _ => panic!() @@ -101,7 +101,7 @@ fn async_import_queue_drops() { let executor = sp_core::testing::SpawnBlockingExecutor::new(); // Perform this test multiple times since it exhibits non-deterministic behavior. for _ in 0..100 { - let verifier = PassThroughVerifier(true); + let verifier = PassThroughVerifier::new(true); let queue = BasicQueue::new( verifier, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index a3e644558b..2896c4e3e1 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -39,7 +39,7 @@ use sc_client_api::{ use sc_consensus::LongestChain; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_network::config::Role; -use sp_consensus::block_validation::DefaultBlockAnnounceValidator; +use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; use sp_consensus::import_queue::{ BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, }; @@ -67,7 +67,33 @@ type AuthorityId = sp_consensus_babe::AuthorityId; /// A Verifier that accepts all blocks and passes them on with the configured /// finality to be imported. #[derive(Clone)] -pub struct PassThroughVerifier(pub bool); +pub struct PassThroughVerifier { + finalized: bool, + fork_choice: ForkChoiceStrategy, +} + +impl PassThroughVerifier { + /// Create a new instance. + /// + /// Every verified block will use `finalized` for the `BlockImportParams`. + pub fn new(finalized: bool) -> Self { + Self { + finalized, + fork_choice: ForkChoiceStrategy::LongestChain, + } + } + + /// Create a new instance. + /// + /// Every verified block will use `finalized` for the `BlockImportParams` and + /// the given [`ForkChoiceStrategy`]. + pub fn new_with_fork_choice(finalized: bool, fork_choice: ForkChoiceStrategy) -> Self { + Self { + finalized, + fork_choice, + } + } +} /// This `Verifier` accepts all data as valid. impl Verifier for PassThroughVerifier { @@ -85,9 +111,9 @@ impl Verifier for PassThroughVerifier { .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); let mut import = BlockImportParams::new(origin, header); import.body = body; - import.finalized = self.0; + import.finalized = self.finalized; import.justification = justification; - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + import.fork_choice = Some(self.fork_choice.clone()); Ok((import, maybe_keys)) } @@ -294,6 +320,7 @@ impl Peer { } else { Default::default() }; + self.block_import.import_block(import_block, cache).expect("block_import failed"); self.network.service().announce_block(hash, Vec::new()); at = hash; @@ -519,6 +546,15 @@ impl VerifierAdapter { } } +/// Configuration for a full peer. +#[derive(Default)] +pub struct FullPeerConfig { + /// Pruning window size. + pub keep_blocks: Option, + /// Block announce validator. + pub block_announce_validator: Option + Send + Sync>>, +} + pub trait TestNetFactory: Sized { type Verifier: 'static + Verifier; type PeerData: Default; @@ -579,12 +615,12 @@ pub trait TestNetFactory: Sized { } fn add_full_peer(&mut self) { - self.add_full_peer_with_states(None) + self.add_full_peer_with_config(Default::default()) } /// Add a full peer. - fn add_full_peer_with_states(&mut self, keep_blocks: Option) { - let test_client_builder = match keep_blocks { + fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { + let test_client_builder = match config.keep_blocks { Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), None => TestClientBuilder::with_default_backend(), }; @@ -641,7 +677,8 @@ pub trait TestNetFactory: Sized { transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + block_announce_validator: config.block_announce_validator + .unwrap_or(Box::new(DefaultBlockAnnounceValidator)), metrics_registry: None, }).unwrap(); @@ -720,7 +757,7 @@ pub trait TestNetFactory: Sized { transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + block_announce_validator: Box::new(DefaultBlockAnnounceValidator), metrics_registry: None, }).unwrap(); @@ -787,6 +824,20 @@ pub trait TestNetFactory: Sized { Poll::Ready(()) } + /// Polls the testnet until all peers are connected to each other. + /// + /// Must be executed in a task context. + fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + let num_peers = self.peers().len(); + if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { + return Poll::Ready(()) + } + + Poll::Pending + } + /// Blocks the current thread until we are sync'ed. /// /// Calls `poll_until_sync` repeatedly. @@ -801,6 +852,15 @@ pub trait TestNetFactory: Sized { futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); } + /// Blocks the current thread until all peers are connected to each other. + /// + /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. + fn block_until_connected(&mut self) { + futures::executor::block_on( + futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)), + ); + } + /// Polls the testnet. Processes all the pending actions and returns `NotReady`. fn poll(&mut self, cx: &mut FutureContext) { self.mut_peers(|peers| { @@ -831,6 +891,17 @@ pub trait TestNetFactory: Sized { pub struct TestNet { peers: Vec>, + fork_choice: ForkChoiceStrategy, +} + +impl TestNet { + /// Create a `TestNet` that used the given fork choice rule. + pub fn with_fork_choice(fork_choice: ForkChoiceStrategy) -> Self { + Self { + peers: Vec::new(), + fork_choice, + } + } } impl TestNetFactory for TestNet { @@ -841,13 +912,14 @@ impl TestNetFactory for TestNet { fn from_config(_config: &ProtocolConfig) -> Self { TestNet { peers: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, } } fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) -> Self::Verifier { - PassThroughVerifier(false) + PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) } fn peer(&mut self, i: usize) -> &mut Peer<()> { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 0269eb3562..1cf2a8fee3 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -20,6 +20,8 @@ use sp_consensus::BlockOrigin; use std::time::Duration; use futures::executor::block_on; use super::*; +use sp_consensus::block_validation::Validation; +use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { let _ = ::env_logger::try_init(); @@ -582,10 +584,10 @@ fn can_sync_explicit_forks() { #[test] fn syncs_header_only_forks() { - let _ = ::env_logger::try_init(); + let _ = env_logger::try_init(); let mut net = TestNet::new(0); - net.add_full_peer_with_states(None); - net.add_full_peer_with_states(Some(3)); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); @@ -683,7 +685,7 @@ fn imports_stale_once() { #[test] fn can_sync_to_peers_with_wrong_common_block() { - let _ = ::env_logger::try_init(); + let _ = env_logger::try_init(); let mut net = TestNet::new(2); net.peer(0).push_blocks(2, true); @@ -710,3 +712,41 @@ fn can_sync_to_peers_with_wrong_common_block() { assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); } +/// Returns `is_new_best = true` for each validated announcement. +struct NewBestBlockAnnounceValidator; + +impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { + fn validate( + &mut self, + _: &Header, + _: &[u8], + ) -> Result> { + Ok(Validation::Success { is_new_best: true }) + } +} + +#[test] +fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { + let _ = env_logger::try_init(); + log::trace!(target: "sync", "Test"); + let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(NewBestBlockAnnounceValidator)), + ..Default::default() + }); + + net.block_until_connected(); + + let block_hash = net.peer(0).push_blocks(1, false); + + while !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } + + // Peer1 should not have the block, because peer 0 did not reported the block + // as new best. However, peer2 has a special block announcement validator + // that flags all blocks as `is_new_best` and thus, it should have synced the blocks. + assert!(!net.peer(1).has_block(&block_hash)); +} diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 8c96f514dd..234356856b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1426,7 +1426,7 @@ fn build_network( let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) } else { - Box::new(DefaultBlockAnnounceValidator::new(client.clone())) + Box::new(DefaultBlockAnnounceValidator) }; let network_params = sc_network::config::Params { diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index e8054f3ae4..66f960f16f 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -36,7 +36,10 @@ impl, B: Block> Chain for Arc { #[derive(Debug, PartialEq, Eq)] pub enum Validation { /// Valid block announcement. - Success, + Success { + /// Is this the new best block of the node? + is_new_best: bool, + }, /// Invalid block announcement. Failure, } @@ -49,18 +52,10 @@ pub trait BlockAnnounceValidator { /// Default implementation of `BlockAnnounceValidator`. #[derive(Debug)] -pub struct DefaultBlockAnnounceValidator { - chain: C -} - -impl DefaultBlockAnnounceValidator { - pub fn new(chain: C) -> Self { - Self { chain } - } -} +pub struct DefaultBlockAnnounceValidator; -impl> BlockAnnounceValidator for DefaultBlockAnnounceValidator { +impl BlockAnnounceValidator for DefaultBlockAnnounceValidator { fn validate(&mut self, _h: &B::Header, _d: &[u8]) -> Result> { - Ok(Validation::Success) + Ok(Validation::Success { is_new_best: false }) } } -- GitLab From 996a86caed165cbb405dfaebe52e681763167609 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 30 Jun 2020 18:59:36 +0100 Subject: [PATCH 102/144] grandpa: minor cleanups in communication module (#6371) * grandpa: replace Result<(), ()> with Option<()> * grandpa: replace &Option with Option<&T> * grandpa: cleanup local id and keystore usages * grandpa: return bool on check_message_signature * grandpa: fix erroneous log message on startup * grandpa: fix test --- .../src/communication/gossip.rs | 12 ++-- .../finality-grandpa/src/communication/mod.rs | 61 ++++++++++++------- client/finality-grandpa/src/environment.rs | 10 ++- client/finality-grandpa/src/justification.rs | 4 +- client/finality-grandpa/src/lib.rs | 6 +- client/finality-grandpa/src/observer.rs | 2 +- client/finality-grandpa/src/tests.rs | 3 +- frame/grandpa/src/equivocation.rs | 2 +- primitives/finality-grandpa/src/lib.rs | 30 ++++----- 9 files changed, 79 insertions(+), 51 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index c96301ede8..7d9fe4e7f2 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -750,7 +750,11 @@ impl Inner { Round(1), )), Some(ref mut v) => if v.set_id == set_id { - if self.authorities != authorities { + let diff_authorities = + self.authorities.iter().collect::>() != + authorities.iter().collect(); + + if diff_authorities { debug!(target: "afg", "Gossip validator noted set {:?} twice with different authorities. \ Was the authority set hard forked?", @@ -829,7 +833,7 @@ impl Inner { return Action::Discard(cost::UNKNOWN_VOTER); } - if let Err(()) = sp_finality_grandpa::check_message_signature( + if !sp_finality_grandpa::check_message_signature( &full.message.message, &full.message.id, &full.message.signature, @@ -2620,12 +2624,12 @@ mod tests { fn allow_noting_different_authorities_for_same_set() { let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); - let a1 = vec![AuthorityId::default()]; + let a1 = vec![AuthorityId::from_slice(&[0; 32])]; val.note_set(SetId(1), a1.clone(), |_, _| {}); assert_eq!(val.inner().read().authorities, a1); - let a2 = vec![AuthorityId::default(), AuthorityId::default()]; + let a2 = vec![AuthorityId::from_slice(&[1; 32]), AuthorityId::from_slice(&[2; 32])]; val.note_set(SetId(1), a2.clone(), |_, _| {}); assert_eq!(val.inner().read().authorities, a2); diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index e331d8b089..b7bbad9f8e 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -105,6 +105,34 @@ mod benefit { pub(super) const PER_EQUIVOCATION: i32 = 10; } +/// A type that ties together our local authority id and a keystore where it is +/// available for signing. +pub struct LocalIdKeystore((AuthorityId, BareCryptoStorePtr)); + +impl LocalIdKeystore { + /// Returns a reference to our local authority id. + fn local_id(&self) -> &AuthorityId { + &(self.0).0 + } + + /// Returns a reference to the keystore. + fn keystore(&self) -> &BareCryptoStorePtr { + &(self.0).1 + } +} + +impl AsRef for LocalIdKeystore { + fn as_ref(&self) -> &BareCryptoStorePtr { + self.keystore() + } +} + +impl From<(AuthorityId, BareCryptoStorePtr)> for LocalIdKeystore { + fn from(inner: (AuthorityId, BareCryptoStorePtr)) -> LocalIdKeystore { + LocalIdKeystore(inner) + } +} + /// If the voter set is larger than this value some telemetry events are not /// sent to avoid increasing usage resource on the node and flooding the /// telemetry server (e.g. received votes, received commits.) @@ -272,11 +300,10 @@ impl> NetworkBridge { /// network all within the current set. pub(crate) fn round_communication( &self, - keystore: Option, + keystore: Option, round: Round, set_id: SetId, voters: Arc>, - local_key: Option, has_voted: HasVoted, ) -> ( impl Stream> + Unpin, @@ -288,9 +315,10 @@ impl> NetworkBridge { &*voters, ); - let local_id = local_key.and_then(|id| { - if voters.contains(&id) { - Some(id) + let keystore = keystore.and_then(|ks| { + let id = ks.local_id(); + if voters.contains(id) { + Some(ks) } else { None } @@ -350,11 +378,10 @@ impl> NetworkBridge { let (tx, out_rx) = mpsc::channel(0); let outgoing = OutgoingMessages:: { - keystore: keystore.clone(), + keystore, round: round.0, set_id: set_id.0, network: self.gossip_engine.clone(), - local_id, sender: tx, has_voted, }; @@ -629,11 +656,10 @@ pub struct SetId(pub SetIdNumber); pub(crate) struct OutgoingMessages { round: RoundNumber, set_id: SetIdNumber, - local_id: Option, + keystore: Option, sender: mpsc::Sender>, network: Arc>>, has_voted: HasVoted, - keystore: Option, } impl Unpin for OutgoingMessages {} @@ -667,19 +693,12 @@ impl Sink> for OutgoingMessages } // when locals exist, sign messages on import - if let Some(ref public) = self.local_id { - let keystore = match &self.keystore { - Some(keystore) => keystore.clone(), - None => { - return Err(Error::Signing("Cannot sign without a keystore".to_string())) - } - }; - + if let Some(ref keystore) = self.keystore { let target_hash = *(msg.target().0); let signed = sp_finality_grandpa::sign_message( - keystore, + keystore.as_ref(), msg, - public.clone(), + keystore.local_id().clone(), self.round, self.set_id, ).ok_or( @@ -774,7 +793,7 @@ fn check_compact_commit( use crate::communication::gossip::Misbehavior; use finality_grandpa::Message as GrandpaMessage; - if let Err(()) = sp_finality_grandpa::check_message_signature_with_buffer( + if !sp_finality_grandpa::check_message_signature_with_buffer( &GrandpaMessage::Precommit(precommit.clone()), id, sig, @@ -862,7 +881,7 @@ fn check_catch_up( for (msg, id, sig) in messages { signatures_checked += 1; - if let Err(()) = sp_finality_grandpa::check_message_signature_with_buffer( + if !sp_finality_grandpa::check_message_signature_with_buffer( &msg, id, sig, diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 6db854bacc..cc6497fc72 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -716,12 +716,18 @@ where HasVoted::No => HasVoted::No, }; + // we can only sign when we have a local key in the authority set + // and we have a reference to the keystore. + let keystore = match (local_key.as_ref(), self.config.keystore.as_ref()) { + (Some(id), Some(keystore)) => Some((id.clone(), keystore.clone()).into()), + _ => None, + }; + let (incoming, outgoing) = self.network.round_communication( - self.config.keystore.clone(), + keystore, crate::communication::Round(round), crate::communication::SetId(self.set_id), self.voters.clone(), - local_key.clone(), has_voted, ); diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index b4db81f8a4..0e51a230c5 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -133,14 +133,14 @@ impl GrandpaJustification { let mut buf = Vec::new(); let mut visited_hashes = HashSet::new(); for signed in self.commit.precommits.iter() { - if sp_finality_grandpa::check_message_signature_with_buffer( + if !sp_finality_grandpa::check_message_signature_with_buffer( &finality_grandpa::Message::Precommit(signed.precommit.clone()), &signed.id, &signed.signature, self.round, set_id, &mut buf, - ).is_err() { + ) { return Err(ClientError::BadJustification( "invalid signature for precommit in grandpa justification".to_string())); } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 481544b5c6..fa2a6fedd8 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -593,7 +593,7 @@ fn global_communication( voters: &Arc>, client: Arc, network: &NetworkBridge, - keystore: &Option, + keystore: Option<&BareCryptoStorePtr>, metrics: Option, ) -> ( impl Stream< @@ -609,7 +609,7 @@ fn global_communication( N: NetworkT, NumberFor: BlockNumberOps, { - let is_voter = is_voter(voters, keystore.as_ref()).is_some(); + let is_voter = is_voter(voters, keystore).is_some(); // verification stream let (global_in, global_out) = network.global_communication( @@ -907,7 +907,7 @@ where &self.env.voters, self.env.client.clone(), &self.env.network, - &self.env.config.keystore, + self.env.config.keystore.as_ref(), self.metrics.as_ref().map(|m| m.until_imported.clone()), ); diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index f7179d70e7..6a7a1f07b0 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -260,7 +260,7 @@ where &voters, self.client.clone(), &self.network, - &self.keystore, + self.keystore.as_ref(), None, ); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index b94c37d07e..50f9e8eba2 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1160,11 +1160,10 @@ fn voter_persists_its_votes() { ); let (round_rx, round_tx) = network.round_communication( - Some(keystore), + Some((peers[1].public().into(), keystore).into()), communication::Round(1), communication::SetId(0), Arc::new(VoterSet::new(voters).unwrap()), - Some(peers[1].public().into()), HasVoted::No, ); diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 7c6e5c6d66..1cc1620125 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -145,7 +145,7 @@ where // validate equivocation proof (check votes are different and // signatures are valid). - if let Err(_) = sp_finality_grandpa::check_equivocation_proof(equivocation_proof.clone()) { + if !sp_finality_grandpa::check_equivocation_proof(equivocation_proof.clone()) { return Err(ReportEquivocationValidityError::InvalidEquivocationProof.into()); } diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 889468a352..f99880041c 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -257,7 +257,7 @@ impl Equivocation { /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. -pub fn check_equivocation_proof(report: EquivocationProof) -> Result<(), ()> +pub fn check_equivocation_proof(report: EquivocationProof) -> bool where H: Clone + Encode + PartialEq, N: Clone + Encode + PartialEq, @@ -270,27 +270,27 @@ where if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && $equivocation.first.0.target_number == $equivocation.second.0.target_number { - return Err(()); + return false; } // check signatures on both votes are valid - check_message_signature( + let valid_first = check_message_signature( &$message($equivocation.first.0), &$equivocation.identity, &$equivocation.first.1, $equivocation.round_number, report.set_id, - )?; + ); - check_message_signature( + let valid_second = check_message_signature( &$message($equivocation.second.0), &$equivocation.identity, &$equivocation.second.1, $equivocation.round_number, report.set_id, - )?; + ); - return Ok(()); + return valid_first && valid_second; }; } @@ -332,7 +332,7 @@ pub fn check_message_signature( signature: &AuthoritySignature, round: RoundNumber, set_id: SetId, -) -> Result<(), ()> +) -> bool where H: Encode, N: Encode, @@ -351,7 +351,7 @@ pub fn check_message_signature_with_buffer( round: RoundNumber, set_id: SetId, buf: &mut Vec, -) -> Result<(), ()> +) -> bool where H: Encode, N: Encode, @@ -360,20 +360,20 @@ where localized_payload_with_buffer(round, set_id, message, buf); - if id.verify(&buf, signature) { - Ok(()) - } else { + let valid = id.verify(&buf, signature); + + if !valid { #[cfg(feature = "std")] debug!(target: "afg", "Bad signature on message from {:?}", id); - - Err(()) } + + valid } /// Localizes the message to the given set and round and signs the payload. #[cfg(feature = "std")] pub fn sign_message( - keystore: BareCryptoStorePtr, + keystore: &BareCryptoStorePtr, message: grandpa::Message, public: AuthorityId, round: RoundNumber, -- GitLab From fccb92391ea75c8d8310b293fbd9e099191166ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 1 Jul 2020 08:49:51 +0200 Subject: [PATCH 103/144] Update to make cargo-deny happy (#6547) * Update to make cargo-deny happy * Remove cargo deny from CI * change (ci): run cargo deny only on tags and schedules Co-authored-by: Denis P --- .gitlab-ci.yml | 4 ++++ Cargo.lock | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 594c9d1dde..69a9d94c08 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -158,6 +158,10 @@ cargo-audit: cargo-deny: stage: test <<: *docker-env + only: + - schedules + - tags + - web script: - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml after_script: diff --git a/Cargo.lock b/Cargo.lock index 94f3f5effe..2563350f6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8901,9 +8901,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" +checksum = "15cb62a0d2770787abc96e99c1cd98fcf17f94959f3af63ca85bdfb203f051b4" dependencies = [ "futures-core", "rustls", -- GitLab From e6c3388b770b3840b4509b3bdc6ccd3727309171 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 1 Jul 2020 09:00:12 +0200 Subject: [PATCH 104/144] pallet-evm: customizable chain id (#6537) --- frame/evm/src/backend.rs | 3 ++- frame/evm/src/lib.rs | 13 ++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs index c610f24bb1..09f31d8aeb 100644 --- a/frame/evm/src/backend.rs +++ b/frame/evm/src/backend.rs @@ -5,6 +5,7 @@ use serde::{Serialize, Deserialize}; use codec::{Encode, Decode}; use sp_core::{U256, H256, H160}; use sp_runtime::traits::UniqueSaturatedInto; +use frame_support::traits::Get; use frame_support::storage::{StorageMap, StorageDoubleMap}; use sha3::{Keccak256, Digest}; use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; @@ -91,7 +92,7 @@ impl<'vicinity, T: Trait> BackendT for Backend<'vicinity, T> { } fn chain_id(&self) -> U256 { - U256::from(sp_io::misc::chain_id()) + U256::from(T::ChainId::get()) } fn exists(&self, _address: H160) -> bool { diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 72392629d6..eebdc66b38 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -118,6 +118,15 @@ impl Precompiles for () { } } +/// Substrate system chain ID. +pub struct SystemChainId; + +impl Get for SystemChainId { + fn get() -> u64 { + sp_io::misc::chain_id() + } +} + static ISTANBUL_CONFIG: Config = Config::istanbul(); /// EVM module trait @@ -134,6 +143,8 @@ pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { type Event: From> + Into<::Event>; /// Precompiles associated with this EVM engine. type Precompiles: Precompiles; + /// Chain ID of EVM. + type ChainId: Get; /// EVM config used in the module. fn config() -> &'static Config { @@ -159,7 +170,7 @@ decl_storage! { trait Store for Module as EVM { Accounts get(fn accounts): map hasher(blake2_128_concat) H160 => Account; AccountCodes get(fn account_codes): map hasher(blake2_128_concat) H160 => Vec; - AccountStorages get(fn account_storages): + AccountStorages get(fn account_storages): double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; } -- GitLab From 23055a9ef77f6c577090ff57eebf510fe7a2d938 Mon Sep 17 00:00:00 2001 From: Denis Pisarev Date: Wed, 1 Jul 2020 09:36:12 +0200 Subject: [PATCH 105/144] Fix runtime benchmarks CI (#6545) * debug (ci): ci config [skip ci] * debug (ci): fix runtime benchmarks * fix identity benchmarks * fix utility benchmarks * Revert "debug (ci): ci config [skip ci]" This reverts commit 081b175b5e95604520c79ea4e5822b84ea35ddaa. * change (ci): touch ci config to run CI Co-authored-by: Shawn Tabrizi Co-authored-by: Gav Wood --- .gitlab-ci.yml | 4 ++-- frame/identity/src/lib.rs | 16 +++++++++++++--- frame/utility/src/benchmarking.rs | 13 ++++++++++++- frame/utility/src/tests.rs | 6 ++++-- 4 files changed, 31 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 69a9d94c08..d3a7f36980 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -263,7 +263,7 @@ test-wasmtime: variables: <<: *default-vars # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. + # but still want to have debug assertions. RUSTFLAGS: -Cdebug-assertions=y RUST_BACKTRACE: 1 except: @@ -289,7 +289,7 @@ test-runtime-benchmarks: - $DEPLOY_TAG script: - cd bin/node/cli - - WASM_BUILD_NO_COLOR=1 time cargo test --release --verbose --features runtime-benchmarks + - WASM_BUILD_NO_COLOR=1 time cargo test --workspace --release --verbose --features runtime-benchmarks - sccache -s test-linux-stable-int: diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 2768340403..19b23a644d 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -1151,7 +1151,7 @@ mod tests { ord_parameter_types, }; use sp_core::H256; - use frame_system::EnsureSignedBy; + use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ @@ -1221,6 +1221,16 @@ mod tests { pub const One: u64 = 1; pub const Two: u64 = 2; } + type EnsureOneOrRoot = EnsureOneOf< + u64, + EnsureRoot, + EnsureSignedBy + >; + type EnsureTwoOrRoot = EnsureOneOf< + u64, + EnsureRoot, + EnsureSignedBy + >; impl Trait for Test { type Event = (); type Currency = Balances; @@ -1231,8 +1241,8 @@ mod tests { type MaxSubAccounts = MaxSubAccounts; type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; - type RegistrarOrigin = EnsureSignedBy; - type ForceOrigin = EnsureSignedBy; + type RegistrarOrigin = EnsureOneOrRoot; + type ForceOrigin = EnsureTwoOrRoot; } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 8d98178957..155a279807 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -20,11 +20,19 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::RawOrigin; +use frame_system::{RawOrigin, EventRecord}; use frame_benchmarking::{benchmarks, account}; const SEED: u32 = 0; +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + benchmarks! { _ { } @@ -37,6 +45,9 @@ benchmarks! { } let caller = account("caller", 0, SEED); }: _(RawOrigin::Signed(caller), calls) + verify { + assert_last_event::(Event::BatchCompleted.into()) + } as_derivative { let u in 0 .. 1000; diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index c0a6499250..349d748a37 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -104,6 +104,8 @@ impl Filter for TestBaseCallFilter { fn filter(c: &Call) -> bool { match *c { Call::Balances(_) => true, + // For benchmarking, this acts as a noop call + Call::System(frame_system::Call::remark(..)) => true, _ => false, } } @@ -163,7 +165,7 @@ fn as_derivative_filters() { assert_noop!(Utility::as_derivative( Origin::signed(1), 1, - Box::new(Call::System(frame_system::Call::remark(vec![]))), + Box::new(Call::System(frame_system::Call::suicide())), ), DispatchError::BadOrigin); }); } @@ -208,7 +210,7 @@ fn batch_with_signed_filters() { new_test_ext().execute_with(|| { assert_ok!( Utility::batch(Origin::signed(1), vec![ - Call::System(frame_system::Call::remark(vec![])) + Call::System(frame_system::Call::suicide()) ]), ); expect_event(Event::BatchInterrupted(0, DispatchError::BadOrigin)); -- GitLab From d855a5e90e9938593edff12c588e37fb201e6a08 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 1 Jul 2020 10:22:47 +0200 Subject: [PATCH 106/144] Fix mocking multiple http calls in the same function call (#6510) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix mocking multiple http calls in the same function call Fixes an issue where a function call would perform more than one http request and wait for each to complete before proceeding. The `RequestId` comes from the length of the `requests` collection in the `OffchainState` and if a request is completed before the next one starts it will be removed and the "next expected" will be off by one. This PR tries to fix that by using a request counter that tracks how many requests have been performed so that we can `remove()` items from the `expected_requests` at the right index. I suspect that this is a sub-optimal soluton and perhaps requests and their mocks should live side by side in the same collection, e.g. in a tuple of `(PendingRequest, Option)`. * Update primitives/core/src/offchain/testing.rs Co-authored-by: Bernhard Schuster * Update primitives/core/src/offchain/testing.rs Co-authored-by: Bernhard Schuster * Panic on overflow * Update primitives/core/src/offchain/testing.rs Co-authored-by: Bastian Köcher * Use a Deque and push/pop expected requests * fix test Co-authored-by: Bernhard Schuster Co-authored-by: Bastian Köcher --- client/executor/src/integration_tests/mod.rs | 4 +- frame/example-offchain-worker/src/tests.rs | 48 +++++++++++++++++++- primitives/core/src/offchain/testing.rs | 14 +++--- 3 files changed, 56 insertions(+), 10 deletions(-) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index f07e98178b..21924270b8 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -497,9 +497,7 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainExt::new(offchain)); - state.write().expect_request( - 0, - testing::PendingRequest { + state.write().expect_request(testing::PendingRequest { method: "POST".into(), uri: "http://localhost:12345".into(), body: vec![1, 2, 3, 4], diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index ef910b95ff..b300809f41 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -154,6 +154,52 @@ fn should_make_http_call_and_parse_result() { }); } +#[test] +fn knows_how_to_mock_several_http_calls() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + { + let mut state = state.write(); + state.expect_request(testing::PendingRequest { + method: "GET".into(), + uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), + response: Some(br#"{"USD": 1}"#.to_vec()), + sent: true, + ..Default::default() + }); + + state.expect_request(testing::PendingRequest { + method: "GET".into(), + uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), + response: Some(br#"{"USD": 2}"#.to_vec()), + sent: true, + ..Default::default() + }); + + state.expect_request(testing::PendingRequest { + method: "GET".into(), + uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), + response: Some(br#"{"USD": 3}"#.to_vec()), + sent: true, + ..Default::default() + }); + } + + + t.execute_with(|| { + let price1 = Example::fetch_price().unwrap(); + let price2 = Example::fetch_price().unwrap(); + let price3 = Example::fetch_price().unwrap(); + + assert_eq!(price1, 100); + assert_eq!(price2, 200); + assert_eq!(price3, 300); + }) + +} + #[test] fn should_submit_signed_transaction_on_chain() { const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; @@ -319,7 +365,7 @@ fn should_submit_raw_unsigned_transaction_on_chain() { } fn price_oracle_response(state: &mut testing::OffchainState) { - state.expect_request(0, testing::PendingRequest { + state.expect_request(testing::PendingRequest { method: "GET".into(), uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), response: Some(br#"{"USD": 155.23}"#.to_vec()), diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index a14e906f54..9145477722 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -21,7 +21,7 @@ //! the extra APIs. use std::{ - collections::BTreeMap, + collections::{BTreeMap, VecDeque}, sync::Arc, }; use crate::offchain::{ @@ -120,7 +120,8 @@ impl OffchainStorage for TestPersistentOffchainDB { pub struct OffchainState { /// A list of pending requests. pub requests: BTreeMap, - expected_requests: BTreeMap, + // Queue of requests that the test is expected to perform (in order). + expected_requests: VecDeque, /// Persistent local storage pub persistent_storage: TestPersistentOffchainDB, /// Local storage @@ -156,8 +157,8 @@ impl OffchainState { } fn fulfill_expected(&mut self, id: u16) { - if let Some(mut req) = self.expected_requests.remove(&RequestId(id)) { - let response = req.response.take().expect("Response checked while added."); + if let Some(mut req) = self.expected_requests.pop_back() { + let response = req.response.take().expect("Response checked when added."); let headers = std::mem::take(&mut req.response_headers); self.fulfill_pending_request(id, req, response, headers); } @@ -169,11 +170,12 @@ impl OffchainState { /// before running the actual code that utilizes them (for instance before calling into runtime). /// Expected request has to be fulfilled before this struct is dropped, /// the `response` and `response_headers` fields will be used to return results to the callers. - pub fn expect_request(&mut self, id: u16, expected: PendingRequest) { + /// Requests are expected to be performed in the insertion order. + pub fn expect_request(&mut self, expected: PendingRequest) { if expected.response.is_none() { panic!("Expected request needs to have a response."); } - self.expected_requests.insert(RequestId(id), expected); + self.expected_requests.push_front(expected); } } -- GitLab From d5d630447aa5463944e314b54f44c00333d666e1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 1 Jul 2020 10:31:56 +0200 Subject: [PATCH 107/144] .maintain/monitoring/alerting-rules: Adjust transaction queue size alert (#6426) The transaction queue size alert has been firing with a constant 10 transactions in the queue. While maybe problematic those 10 transactions don't need to be the same across scrape intervals. Instead of alerting with a size above 10, alert based on two things: 1. Monotonically increasing queue size 2. Upper limit queue size reached --- .../alerting-rules/alerting-rule-tests.yaml | 59 +++++++++++++------ .../alerting-rules/alerting-rules.yaml | 29 +++++---- 2 files changed, 60 insertions(+), 28 deletions(-) diff --git a/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml index 069cfaf977..288750be3c 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml @@ -18,14 +18,14 @@ tests: pod="polkadot-abcdef01234-abcdef", instance="polkadot-abcdef01234-abcdef", }' - values: '10+1x30' # 10 11 12 13 .. 40 + values: '11+1x10 22+2x30 10043x5' - series: 'polkadot_sub_txpool_validations_finished{ job="polkadot", pod="polkadot-abcdef01234-abcdef", instance="polkadot-abcdef01234-abcdef", }' - values: '0x30' # 0 0 0 0 .. 0 + values: '0+1x42 42x5' - series: 'polkadot_block_height{ status="best", job="polkadot", @@ -161,11 +161,17 @@ tests: # Transaction queue ###################################################################### - - eval_time: 10m - alertname: TransactionQueueSize - exp_alerts: - eval_time: 11m - alertname: TransactionQueueSize + alertname: TransactionQueueSizeIncreasing + # Number of validations scheduled and finished both grow at a rate + # of 1 in the first 10 minutes, thereby the queue is not increasing + # in size, thus don't expect an alert. + exp_alerts: + - eval_time: 22m + alertname: TransactionQueueSizeIncreasing + # Number of validations scheduled is growing twice as fast as the + # number of validations finished after minute 10. Thus expect + # warning alert after 20 minutes. exp_alerts: - exp_labels: severity: warning @@ -173,12 +179,14 @@ tests: instance: polkadot-abcdef01234-abcdef job: polkadot exp_annotations: - message: "The node polkadot-abcdef01234-abcdef has more - than 10 transactions in the queue for more than 10 - minutes" - - - eval_time: 31m - alertname: TransactionQueueSize + message: "The transaction pool size on node + polkadot-abcdef01234-abcdef has been monotonically + increasing for the last 10 minutes." + - eval_time: 43m + alertname: TransactionQueueSizeIncreasing + # Number of validations scheduled is growing twice as fast as the + # number of validations finished after minute 10. Thus expect + # both warning and critical alert after 40 minutes. exp_alerts: - exp_labels: severity: warning @@ -186,18 +194,33 @@ tests: instance: polkadot-abcdef01234-abcdef job: polkadot exp_annotations: - message: "The node polkadot-abcdef01234-abcdef has more - than 10 transactions in the queue for more than 10 - minutes" + message: "The transaction pool size on node + polkadot-abcdef01234-abcdef has been monotonically + increasing for the last 10 minutes." + - exp_labels: + severity: critical + pod: polkadot-abcdef01234-abcdef + instance: polkadot-abcdef01234-abcdef + job: polkadot + exp_annotations: + message: "The transaction pool size on node + polkadot-abcdef01234-abcdef has been monotonically + increasing for the last 30 minutes." + - eval_time: 49m + alertname: TransactionQueueSizeHigh + # After minute 43 the number of validations scheduled jumps up + # drastically while the number of validations finished stays the + # same. Thus expect an alert. + exp_alerts: - exp_labels: severity: critical pod: polkadot-abcdef01234-abcdef instance: polkadot-abcdef01234-abcdef job: polkadot exp_annotations: - message: "The node polkadot-abcdef01234-abcdef has more - than 10 transactions in the queue for more than 30 - minutes" + message: "The transaction pool size on node + polkadot-abcdef01234-abcdef has been above 10_000 for the + last 5 minutes." ###################################################################### # Networking diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 06d204f7af..2ed3889a2c 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -73,24 +73,33 @@ groups: # Transaction queue ############################################################################## - - alert: TransactionQueueSize - expr: 'polkadot_sub_txpool_validations_scheduled - - polkadot_sub_txpool_validations_finished > 10' + - alert: TransactionQueueSizeIncreasing + expr: 'increase(polkadot_sub_txpool_validations_scheduled[5m]) - + increase(polkadot_sub_txpool_validations_finished[5m]) > 0' for: 10m labels: severity: warning annotations: - message: 'The node {{ $labels.instance }} has more than 10 transactions in - the queue for more than 10 minutes' - - alert: TransactionQueueSize - expr: 'polkadot_sub_txpool_validations_scheduled - - polkadot_sub_txpool_validations_finished > 10' + message: 'The transaction pool size on node {{ $labels.instance }} has + been monotonically increasing for the last 10 minutes.' + - alert: TransactionQueueSizeIncreasing + expr: 'increase(polkadot_sub_txpool_validations_scheduled[5m]) - + increase(polkadot_sub_txpool_validations_finished[5m]) > 0' for: 30m labels: severity: critical annotations: - message: 'The node {{ $labels.instance }} has more than 10 transactions in - the queue for more than 30 minutes' + message: 'The transaction pool size on node {{ $labels.instance }} has + been monotonically increasing for the last 30 minutes.' + - alert: TransactionQueueSizeHigh + expr: 'polkadot_sub_txpool_validations_scheduled - + polkadot_sub_txpool_validations_finished > 10000' + for: 5m + labels: + severity: critical + annotations: + message: 'The transaction pool size on node {{ $labels.instance }} has + been above 10_000 for the last 5 minutes.' ############################################################################## # Networking -- GitLab From 176bda52a49c1a5d07963327a9fef960ca6a34a3 Mon Sep 17 00:00:00 2001 From: s3krit Date: Wed, 1 Jul 2020 11:33:10 +0200 Subject: [PATCH 108/144] Fix auto-label-issues.yml (#6536) statements in github actions cannot use ", must use ' Co-authored-by: Gav Wood --- .github/workflows/auto-label-issues.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml index ce0bad59d1..cd889b5941 100644 --- a/.github/workflows/auto-label-issues.yml +++ b/.github/workflows/auto-label-issues.yml @@ -8,10 +8,10 @@ on: jobs: label-new-issues: - runs-on: ubuntu-latest + runs-on: ubuntu-latest steps: - name: Label drafts uses: andymckay/labeler@master - if: github.event.issue.author_association == "NONE" + if: github.event.issue.author_association == 'NONE' with: add-labels: 'Z0-unconfirmed' -- GitLab From 4919c808cb75618d95762944aa6f5664c1aa3b59 Mon Sep 17 00:00:00 2001 From: s3krit Date: Wed, 1 Jul 2020 11:33:28 +0200 Subject: [PATCH 109/144] [CI] Add Github Action to notify devops of PRs labelled with A1-needsburnin (#6525) * add burnin-label-notification.yml * fix burnin-label-notification.yml * fix burnin-label-notification.yml * fix burnin-label-notification.yml * fix burnin-label-notification.yml * Update .github/workflows/burnin-label-notification.yml Co-authored-by: Benjamin Kampmann Co-authored-by: Benjamin Kampmann Co-authored-by: Gav Wood --- .github/workflows/burnin-label-notification.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/burnin-label-notification.yml diff --git a/.github/workflows/burnin-label-notification.yml b/.github/workflows/burnin-label-notification.yml new file mode 100644 index 0000000000..da422a659e --- /dev/null +++ b/.github/workflows/burnin-label-notification.yml @@ -0,0 +1,17 @@ +name: Notify devops when burn-in label applied +on: + pull_request: + types: [labeled] + +jobs: + notify-devops: + runs-on: ubuntu-latest + steps: + - name: Notify devops + if: github.event.label.name == 'A1-needsburnin' + uses: s3krit/matrix-message-action@v0.0.2 + with: + room_id: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ROOM_ID }} + access_token: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ACCESS_TOKEN }} + message: "@room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})" + server: "matrix.parity.io" -- GitLab From 8ef1ac0ee13d2a72cc1c391d4624dfaaafe641e8 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 1 Jul 2020 11:59:07 +0200 Subject: [PATCH 110/144] Restrict `Protected` to some heap types. (#6471) * Restrict `Protected` to some heap types. * Comment abut Protected usage. * Remove Protected from crypto, use secrecy crate for existing uses. * use a parse function * fix error convert * Rename and move secretY string function. * std result --- Cargo.lock | 10 +++++ client/cli/src/params/keystore_params.rs | 33 ++++++++++----- client/keystore/src/lib.rs | 27 ++++++++---- client/service/src/config.rs | 4 +- primitives/core/Cargo.toml | 2 + primitives/core/src/crypto.rs | 53 ++++-------------------- 6 files changed, 62 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2563350f6e..afdfb5e81c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7089,6 +7089,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "secrecy" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +dependencies = [ + "zeroize", +] + [[package]] name = "security-framework" version = "0.4.2" @@ -7616,6 +7625,7 @@ dependencies = [ "rand_chacha 0.2.2", "regex", "schnorrkel", + "secrecy", "serde", "serde_json", "sha2", diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 840cc51dff..8b20dd247a 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -21,6 +21,7 @@ use sc_service::config::KeystoreConfig; use std::fs; use std::path::PathBuf; use structopt::StructOpt; +use sp_core::crypto::SecretString; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -42,9 +43,10 @@ pub struct KeystoreParams { /// Password used by the keystore. #[structopt( long = "password", + parse(try_from_str = secret_string_from_str), conflicts_with_all = &[ "password-interactive", "password-filename" ] )] - pub password: Option, + pub password: Option, /// File that contains the password used by the keystore. #[structopt( @@ -56,26 +58,37 @@ pub struct KeystoreParams { pub password_filename: Option, } +/// Parse a sercret string, returning a displayable error. +pub fn secret_string_from_str(s: &str) -> std::result::Result { + Ok(std::str::FromStr::from_str(s) + .map_err(|_e| "Could not get SecretString".to_string())?) +} + impl KeystoreParams { /// Get the keystore configuration for the parameters pub fn keystore_config(&self, base_path: &PathBuf) -> Result { let password = if self.password_interactive { #[cfg(not(target_os = "unknown"))] { - Some(input_keystore_password()?.into()) + let mut password = input_keystore_password()?; + let secret = std::str::FromStr::from_str(password.as_str()) + .map_err(|()| "Error reading password")?; + use sp_core::crypto::Zeroize; + password.zeroize(); + Some(secret) } #[cfg(target_os = "unknown")] None } else if let Some(ref file) = self.password_filename { - Some( - fs::read_to_string(file) - .map_err(|e| format!("{}", e))? - .into(), - ) - } else if let Some(ref password) = self.password { - Some(password.clone().into()) + let mut password = fs::read_to_string(file) + .map_err(|e| format!("{}", e))?; + let secret = std::str::FromStr::from_str(password.as_str()) + .map_err(|()| "Error reading password")?; + use sp_core::crypto::Zeroize; + password.zeroize(); + Some(secret) } else { - None + self.password.clone() }; let path = self diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index aed60ab0cf..7fec32bae2 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] use std::{collections::{HashMap, HashSet}, path::PathBuf, fs::{self, File}, io::{self, Write}, sync::Arc}; use sp_core::{ - crypto::{IsWrappedBy, CryptoTypePublicPair, KeyTypeId, Pair as PairT, Protected, Public}, + crypto::{IsWrappedBy, CryptoTypePublicPair, KeyTypeId, Pair as PairT, ExposeSecret, SecretString, Public}, traits::{BareCryptoStore, Error as TraitError}, sr25519::{Public as Sr25519Public, Pair as Sr25519Pair}, vrf::{VRFTranscriptData, VRFSignature, make_transcript}, @@ -95,14 +95,14 @@ pub struct Store { path: Option, /// Map over `(KeyTypeId, Raw public key)` -> `Key phrase/seed` additional: HashMap<(KeyTypeId, Vec), String>, - password: Option>, + password: Option, } impl Store { /// Open the store at the given path. /// /// Optionally takes a password that will be used to encrypt/decrypt the keys. - pub fn open>(path: T, password: Option>) -> Result { + pub fn open>(path: T, password: Option) -> Result { let path = path.into(); fs::create_dir_all(&path)?; @@ -155,7 +155,7 @@ impl Store { pub fn insert_by_type(&self, key_type: KeyTypeId, suri: &str) -> Result { let pair = Pair::from_string( suri, - self.password.as_ref().map(|p| &***p) + self.password() ).map_err(|_| Error::InvalidSeed)?; self.insert_unknown(key_type, suri, pair.public().as_slice()) .map_err(|_| Error::Unavailable)?; @@ -173,7 +173,7 @@ impl Store { /// /// Places it into the file system store. pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { - let (pair, phrase, _) = Pair::generate_with_phrase(self.password.as_ref().map(|p| &***p)); + let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { let mut file = File::create(path)?; serde_json::to_writer(&file, &phrase)?; @@ -229,7 +229,7 @@ impl Store { let phrase = self.key_phrase_by_type(public.as_slice(), key_type)?; let pair = Pair::from_string( &phrase, - self.password.as_ref().map(|p| &***p), + self.password(), ).map_err(|_| Error::InvalidPhrase)?; if &pair.public() == public { @@ -434,7 +434,9 @@ impl BareCryptoStore for Store { } fn password(&self) -> Option<&str> { - self.password.as_ref().map(|x| x.as_str()) + self.password.as_ref() + .map(|p| p.expose_secret()) + .map(|p| p.as_str()) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { @@ -464,6 +466,7 @@ mod tests { use super::*; use tempfile::TempDir; use sp_core::{testing::SR25519, crypto::Ss58Codec}; + use std::str::FromStr; #[test] fn basic_store() { @@ -504,7 +507,10 @@ mod tests { fn password_being_used() { let password = String::from("password"); let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), Some(password.clone().into())).unwrap(); + let store = Store::open( + temp_dir.path(), + Some(FromStr::from_str(password.as_str()).unwrap()), + ).unwrap(); let pair: ed25519::AppPair = store.write().generate().unwrap(); assert_eq!( @@ -516,7 +522,10 @@ mod tests { let store = Store::open(temp_dir.path(), None).unwrap(); assert!(store.read().key_pair::(&pair.public()).is_err()); - let store = Store::open(temp_dir.path(), Some(password.into())).unwrap(); + let store = Store::open( + temp_dir.path(), + Some(FromStr::from_str(password.as_str()).unwrap()), + ).unwrap(); assert_eq!( pair.public(), store.read().key_pair::(&pair.public()).unwrap().public(), diff --git a/client/service/src/config.rs b/client/service/src/config.rs index fb4dbc666a..5015ce7fac 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -27,7 +27,7 @@ use sc_client_api::execution_extensions::ExecutionStrategies; use std::{io, future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; use sc_chain_spec::ChainSpec; -use sp_core::crypto::Protected; +use sp_core::crypto::SecretString; pub use sc_telemetry::TelemetryEndpoints; use prometheus_endpoint::Registry; #[cfg(not(target_os = "unknown"))] @@ -130,7 +130,7 @@ pub enum KeystoreConfig { /// The path of the keystore. path: PathBuf, /// Node keystore's password. - password: Option> + password: Option }, /// In-memory keystore. Recommended for in-browser nodes. InMemory, diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 33b4a7bc82..6a7568a626 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -31,6 +31,7 @@ tiny-bip39 = { version = "0.7", optional = true } regex = { version = "1.3.1", optional = true } num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.0.0", default-features = false } +secrecy = { version = "0.6.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } sp-debug-derive = { version = "2.0.0-rc4", path = "../debug-derive" } @@ -106,6 +107,7 @@ std = [ "sp-storage/std", "sp-runtime-interface/std", "zeroize/alloc", + "secrecy/alloc", "futures", "futures/thread-pool", "libsecp256k1/std", diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index aa77345993..745f5776fe 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -37,10 +37,16 @@ use regex::Regex; use base58::{FromBase58, ToBase58}; #[cfg(feature = "std")] use crate::hexdisplay::HexDisplay; -use zeroize::Zeroize; #[doc(hidden)] pub use sp_std::ops::Deref; use sp_runtime_interface::pass_by::PassByInner; +/// Trait to zeroize a memory buffer. +pub use zeroize::Zeroize; +/// Trait for accessing reference to `SecretString`. +pub use secrecy::ExposeSecret; +/// A store for sensitive data. +#[cfg(feature = "std")] +pub use secrecy::SecretString; /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -79,51 +85,6 @@ impl> UncheckedInto for S { } } -/// A store for sensitive data. -/// -/// Calls `Zeroize::zeroize` upon `Drop`. -#[derive(Clone)] -pub struct Protected(T); - -impl AsRef for Protected { - fn as_ref(&self) -> &T { - &self.0 - } -} - -impl sp_std::ops::Deref for Protected { - type Target = T; - - fn deref(&self) -> &T { - &self.0 - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for Protected { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "") - } -} - -impl From for Protected { - fn from(t: T) -> Self { - Protected(t) - } -} - -impl Zeroize for Protected { - fn zeroize(&mut self) { - self.0.zeroize() - } -} - -impl Drop for Protected { - fn drop(&mut self) { - self.zeroize() - } -} - /// An error with the interpretation of a secret. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg(feature = "full_crypto")] -- GitLab From 440d25337e5699929e7029d47329721854ada707 Mon Sep 17 00:00:00 2001 From: Ashley Date: Thu, 2 Jul 2020 12:57:56 +0200 Subject: [PATCH 111/144] Remove `ServiceBuilderCommand` and implement the chain ops as standalone functions instead. (#6543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * :) * Slight tidy * Remove ServiceBuilderCommand * Remove whitespace * Keep task manager alive for check_block/import_blocks * Pass task_manager to run_until_exit * Make task_manager in run_until_exit and make subcommands async * Change the async_run fn to return a future and task manager * async_run should take a result fn * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fix spaces in export_raw_state Co-authored-by: Bastian Köcher --- Cargo.lock | 101 +-- bin/node-template/node/src/command.rs | 5 +- bin/node/cli/src/command.rs | 6 +- client/cli/Cargo.toml | 1 + client/cli/src/commands/build_spec_cmd.rs | 11 +- client/cli/src/commands/check_block_cmd.rs | 26 +- client/cli/src/commands/export_blocks_cmd.rs | 24 +- client/cli/src/commands/export_state_cmd.rs | 27 +- client/cli/src/commands/import_blocks_cmd.rs | 23 +- client/cli/src/commands/purge_chain_cmd.rs | 6 +- client/cli/src/commands/revert_cmd.rs | 22 +- client/cli/src/runner.rs | 67 +- client/service/src/builder.rs | 58 +- client/service/src/chain_ops.rs | 614 ------------------ client/service/src/chain_ops/check_block.rs | 51 ++ client/service/src/chain_ops/export_blocks.rs | 104 +++ .../service/src/chain_ops/export_raw_state.rs | 71 ++ client/service/src/chain_ops/import_blocks.rs | 472 ++++++++++++++ client/service/src/chain_ops/mod.rs | 29 + client/service/src/chain_ops/revert_chain.rs | 43 ++ client/service/src/lib.rs | 4 +- 21 files changed, 960 insertions(+), 805 deletions(-) delete mode 100644 client/service/src/chain_ops.rs create mode 100644 client/service/src/chain_ops/check_block.rs create mode 100644 client/service/src/chain_ops/export_blocks.rs create mode 100644 client/service/src/chain_ops/export_raw_state.rs create mode 100644 client/service/src/chain_ops/import_blocks.rs create mode 100644 client/service/src/chain_ops/mod.rs create mode 100644 client/service/src/chain_ops/revert_chain.rs diff --git a/Cargo.lock b/Cargo.lock index afdfb5e81c..b3dd853538 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,7 +195,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1040,7 +1040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1080,7 +1080,7 @@ checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1182,7 +1182,7 @@ checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1335,7 +1335,7 @@ checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] @@ -1526,7 +1526,7 @@ dependencies = [ "frame-support-procedural-tools", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1537,7 +1537,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1546,7 +1546,7 @@ version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -1763,7 +1763,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2309,7 +2309,7 @@ checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2448,7 +2448,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -2740,7 +2740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f09548626b737ed64080fde595e06ce1117795b8b9fc4d2629fa36561c583171" dependencies = [ "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4635,7 +4635,7 @@ dependencies = [ "proc-macro2", "quote 1.0.6", "sp-runtime", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4869,7 +4869,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -4919,7 +4919,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] @@ -5007,7 +5007,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5071,7 +5071,7 @@ checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5210,7 +5210,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "version_check", ] @@ -5222,7 +5222,7 @@ checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "syn-mid", "version_check", ] @@ -5241,9 +5241,9 @@ checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" [[package]] name = "proc-macro2" -version = "1.0.10" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" +checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ "unicode-xid 0.2.0", ] @@ -5315,7 +5315,7 @@ dependencies = [ "itertools 0.8.2", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5683,7 +5683,7 @@ checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5763,7 +5763,7 @@ checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -5888,7 +5888,7 @@ checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -6021,7 +6021,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -6048,6 +6048,7 @@ dependencies = [ "sc-service", "sc-telemetry", "sc-tracing", + "serde", "serde_json", "sp-blockchain", "sp-core", @@ -7076,7 +7077,7 @@ checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7166,22 +7167,22 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7295,7 +7296,7 @@ checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7393,7 +7394,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7657,7 +7658,7 @@ version = "2.0.0-rc4" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7756,7 +7757,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -7848,7 +7849,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8149,7 +8150,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8170,7 +8171,7 @@ dependencies = [ "heck", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8538,9 +8539,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.17" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ "proc-macro2", "quote 1.0.6", @@ -8555,7 +8556,7 @@ checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8575,7 +8576,7 @@ checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "unicode-xid 0.2.0", ] @@ -8638,7 +8639,7 @@ dependencies = [ "lazy_static", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "version_check", ] @@ -8668,7 +8669,7 @@ checksum = "ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -8874,7 +8875,7 @@ checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -9075,7 +9076,7 @@ checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", ] [[package]] @@ -9427,7 +9428,7 @@ dependencies = [ "log", "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "wasm-bindgen-shared", ] @@ -9461,7 +9462,7 @@ checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9782,7 +9783,7 @@ checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2", "quote 1.0.6", - "syn 1.0.17", + "syn 1.0.33", "synstructure", ] diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 4f2fd3aad6..1bc436a063 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -71,7 +71,10 @@ pub fn run() -> sc_cli::Result<()> { match &cli.subcommand { Some(subcommand) => { let runner = cli.create_runner(subcommand)?; - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + runner.run_subcommand(subcommand, |config| { + let (builder, _, _) = new_full_start!(config); + Ok(builder.to_chain_ops_parts()) + }) } None => { let runner = cli.create_runner(&cli.run)?; diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b07e0cdc90..4ac796370c 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -97,8 +97,10 @@ pub fn run() -> Result<()> { } Some(Subcommand::Base(subcommand)) => { let runner = cli.create_runner(subcommand)?; - - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + runner.run_subcommand(subcommand, |config| { + let (builder, _, _, _) = new_full_start!(config); + Ok(builder.to_chain_ops_parts()) + }) } } } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 616b4f3481..6ebf2f9bf8 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -43,6 +43,7 @@ structopt = "0.3.8" sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +serde = "1.0.111" [target.'cfg(not(target_os = "unknown"))'.dependencies] rpassword = "4.0.1" diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 23626359ff..616c5139f6 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -22,7 +22,7 @@ use crate::params::SharedParams; use crate::CliConfiguration; use log::info; use sc_network::config::build_multiaddr; -use sc_service::{config::MultiaddrWithPeerId, Configuration}; +use sc_service::{config::{MultiaddrWithPeerId, NetworkConfiguration}, ChainSpec}; use structopt::StructOpt; use std::io::Write; @@ -51,13 +51,16 @@ pub struct BuildSpecCmd { impl BuildSpecCmd { /// Run the build-spec command - pub fn run(&self, config: Configuration) -> error::Result<()> { + pub fn run( + &self, + mut spec: Box, + network_config: NetworkConfiguration, + ) -> error::Result<()> { info!("Building chain spec"); - let mut spec = config.chain_spec; let raw_output = self.raw; if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { - let keys = config.network.node_key.into_keypair()?; + let keys = network_config.node_key.into_keypair()?; let peer_id = keys.public().into_peer_id(); let addr = MultiaddrWithPeerId { multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index c000ea7fb1..b536d4f26b 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -19,9 +19,9 @@ use crate::{ CliConfiguration, error, params::{ImportParams, SharedParams, BlockNumberOrHash}, }; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr}; +use sc_client_api::{BlockBackend, UsageProvider}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; /// The `check-block` command used to validate blocks. @@ -48,21 +48,21 @@ pub struct CheckBlockCmd { impl CheckBlockCmd { /// Run the check-block command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + import_queue: IQ, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: BlockT + Debug, - as FromStr>::Err: std::fmt::Debug, - BB::Hash: FromStr, - ::Err: std::fmt::Debug, + B: BlockT + for<'de> serde::Deserialize<'de>, + C: BlockBackend + UsageProvider + Send + Sync + 'static, + IQ: sc_service::ImportQueue + 'static, + B::Hash: FromStr, + ::Err: Debug, + <::Number as FromStr>::Err: Debug, { let start = std::time::Instant::now(); - builder(config)?.check_block(self.input.parse()?).await?; + sc_service::chain_ops::check_block(client, import_queue, self.input.parse()?).await?; println!("Completed in {} ms.", start.elapsed().as_millis()); Ok(()) diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 7c523c0555..118832a79d 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -21,13 +21,16 @@ use crate::params::{BlockNumber, DatabaseParams, PruningParams, SharedParams}; use crate::CliConfiguration; use log::info; use sc_service::{ - config::DatabaseConfig, Configuration, ServiceBuilderCommand, + config::DatabaseConfig, chain_ops::export_blocks, }; +use sc_client_api::{BlockBackend, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; use std::fs; use std::io; use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; use structopt::StructOpt; /// The `export-blocks` command used to export blocks. @@ -68,19 +71,17 @@ pub struct ExportBlocksCmd { impl ExportBlocksCmd { /// Run the export-blocks command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + database_config: DatabaseConfig, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + B: BlockT, + C: BlockBackend + UsageProvider + 'static, + <::Number as FromStr>::Err: Debug, { - if let DatabaseConfig::RocksDb { ref path, .. } = &config.database { + if let DatabaseConfig::RocksDb { ref path, .. } = database_config { info!("DB path: {}", path.display()); } @@ -94,8 +95,7 @@ impl ExportBlocksCmd { None => Box::new(io::stdout()), }; - builder(config)? - .export_blocks(file, from.into(), to, binary) + export_blocks(client, file, from.into(), to, binary) .await .map_err(Into::into) } diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 23a43a178a..c078db0d8a 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -20,10 +20,10 @@ use crate::{ CliConfiguration, error, params::{PruningParams, SharedParams, BlockNumberOrHash}, }; use log::info; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{fmt::Debug, str::FromStr, io::Write}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::{fmt::Debug, str::FromStr, io::Write, sync::Arc}; use structopt::StructOpt; +use sc_client_api::{StorageProvider, UsageProvider}; /// The `export-state` command used to export the state of a given block into /// a chain spec. @@ -44,23 +44,22 @@ pub struct ExportStateCmd { impl ExportStateCmd { /// Run the `export-state` command - pub fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + mut input_spec: Box, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: BlockT + Debug, - as FromStr>::Err: std::fmt::Debug, - BB::Hash: FromStr, - ::Err: std::fmt::Debug, + B: BlockT, + C: UsageProvider + StorageProvider, + BA: sc_client_api::backend::Backend, + B::Hash: FromStr, + ::Err: Debug, + <::Number as FromStr>::Err: Debug, { info!("Exporting raw state..."); - let mut input_spec = config.chain_spec.cloned_box(); let block_id = self.input.as_ref().map(|b| b.parse()).transpose()?; - let raw_state = builder(config)?.export_raw_state(block_id)?; + let raw_state = sc_service::chain_ops::export_raw_state(client, block_id)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 8e178c4b97..00f8ec43b0 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -20,13 +20,15 @@ use crate::error; use crate::params::ImportParams; use crate::params::SharedParams; use crate::CliConfiguration; -use sc_service::{Configuration, ServiceBuilderCommand}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sc_service::chain_ops::import_blocks; +use sp_runtime::traits::Block as BlockT; use std::fmt::Debug; use std::fs; use std::io::{self, Read, Seek}; use std::path::PathBuf; +use std::sync::Arc; use structopt::StructOpt; +use sc_client_api::UsageProvider; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt)] @@ -61,17 +63,15 @@ impl ReadPlusSeek for T {} impl ImportBlocksCmd { /// Run the import-blocks command - pub async fn run( + pub async fn run( &self, - config: Configuration, - builder: B, + client: Arc, + import_queue: IQ, ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + C: UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: sc_service::ImportQueue + 'static, { let file: Box = match &self.input { Some(filename) => Box::new(fs::File::open(filename)?), @@ -82,8 +82,7 @@ impl ImportBlocksCmd { } }; - builder(config)? - .import_blocks(file, false, self.binary) + import_blocks(client, import_queue, file, false, self.binary) .await .map_err(Into::into) } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 053f427309..9c9c6e91fb 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -19,7 +19,7 @@ use crate::error; use crate::params::{DatabaseParams, SharedParams}; use crate::CliConfiguration; -use sc_service::Configuration; +use sc_service::DatabaseConfig; use std::fmt::Debug; use std::fs; use std::io::{self, Write}; @@ -43,8 +43,8 @@ pub struct PurgeChainCmd { impl PurgeChainCmd { /// Run the purge command - pub fn run(&self, config: Configuration) -> error::Result<()> { - let db_path = config.database.path() + pub fn run(&self, database_config: DatabaseConfig) -> error::Result<()> { + let db_path = database_config.path() .ok_or_else(|| error::Error::Input("Cannot purge custom database implementation".into()) )?; diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index 1b5489df70..bbfb0d2ff9 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -19,10 +19,13 @@ use crate::error; use crate::params::{BlockNumber, PruningParams, SharedParams}; use crate::CliConfiguration; -use sc_service::{Configuration, ServiceBuilderCommand}; +use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; +use std::str::FromStr; +use std::sync::Arc; use structopt::StructOpt; +use sc_client_api::{Backend, UsageProvider}; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt)] @@ -42,16 +45,19 @@ pub struct RevertCmd { impl RevertCmd { /// Run the revert command - pub fn run(&self, config: Configuration, builder: B) -> error::Result<()> + pub async fn run( + &self, + client: Arc, + backend: Arc, + ) -> error::Result<()> where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, + B: BlockT, + BA: Backend, + C: UsageProvider, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, { let blocks = self.num.parse()?; - builder(config)?.revert_chain(blocks)?; + revert_chain(client, backend, blocks)?; Ok(()) } diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index fcc869dc87..807a5620ec 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -25,10 +25,11 @@ use futures::pin_mut; use futures::select; use futures::{future, future::FutureExt, Future}; use log::info; -use sc_service::{Configuration, ServiceBuilderCommand, TaskType, TaskManager}; +use sc_service::{Configuration, TaskType, TaskManager}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; -use std::{fmt::Debug, marker::PhantomData, str::FromStr}; +use std::{fmt::Debug, marker::PhantomData, str::FromStr, sync::Arc}; +use sc_client_api::{UsageProvider, BlockBackend, StorageProvider}; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), Box> @@ -92,7 +93,11 @@ pub fn build_runtime() -> std::result::Result(mut tokio_runtime: tokio::runtime::Runtime, future: FUT) -> Result<()> +fn run_until_exit( + mut tokio_runtime: tokio::runtime::Runtime, + future: FUT, + mut task_manager: TaskManager, +) -> Result<()> where FUT: Future> + future::Future, ERR: 'static + std::error::Error, @@ -102,6 +107,9 @@ where tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + task_manager.terminate(); + drop(tokio_runtime); + Ok(()) } @@ -173,29 +181,47 @@ impl Runner { /// A helper function that runs a future with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. - pub fn run_subcommand(self, subcommand: &Subcommand, builder: B) -> Result<()> + pub fn run_subcommand(self, subcommand: &Subcommand, builder: BU) + -> Result<()> where - B: FnOnce(Configuration) -> sc_service::error::Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, + BU: FnOnce(Configuration) + -> sc_service::error::Result<(Arc, Arc, IQ, TaskManager)>, + B: BlockT + for<'de> serde::Deserialize<'de>, + BA: sc_client_api::backend::Backend + 'static, + IQ: sc_service::ImportQueue + 'static, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + <<::Header as HeaderT>::Number as FromStr>::Err: Debug, + CL: UsageProvider + BlockBackend + StorageProvider + Send + Sync + + 'static, { + let chain_spec = self.config.chain_spec.cloned_box(); + let network_config = self.config.network.clone(); + let db_config = self.config.database.clone(); + match subcommand { - Subcommand::BuildSpec(cmd) => cmd.run(self.config), + Subcommand::BuildSpec(cmd) => cmd.run(chain_spec, network_config), Subcommand::ExportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, _, task_manager) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, db_config), task_manager) } Subcommand::ImportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, import_queue, task_manager) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, import_queue), task_manager) } Subcommand::CheckBlock(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + let (client, _, import_queue, task_manager) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, import_queue), task_manager) } - Subcommand::Revert(cmd) => cmd.run(self.config, builder), - Subcommand::PurgeChain(cmd) => cmd.run(self.config), - Subcommand::ExportState(cmd) => cmd.run(self.config, builder), + Subcommand::Revert(cmd) => { + let (client, backend, _, task_manager) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, backend), task_manager) + }, + Subcommand::PurgeChain(cmd) => cmd.run(db_config), + Subcommand::ExportState(cmd) => { + let (client, _, _, task_manager) = builder(self.config)?; + run_until_exit(self.tokio_runtime, cmd.run(client, chain_spec), task_manager) + }, } } @@ -221,11 +247,14 @@ impl Runner { /// A helper function that runs a future with tokio and stops if the process receives /// the signal SIGTERM or SIGINT - pub fn async_run(self, runner: impl FnOnce(Configuration) -> FUT) -> Result<()> + pub fn async_run( + self, runner: impl FnOnce(Configuration) -> Result<(FUT, TaskManager)>, + ) -> Result<()> where FUT: Future>, { - run_until_exit(self.tokio_runtime, runner(self.config)) + let (future, task_manager) = runner(self.config)?; + run_until_exit(self.tokio_runtime, future, task_manager) } /// Get an immutable reference to the node Configuration diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 234356856b..3a1c5c85af 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -45,15 +45,11 @@ use sc_network::NetworkService; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, Zero, BlockIdTo, + Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{ - collections::HashMap, - io::{Read, Write, Seek}, - marker::PhantomData, sync::Arc, pin::Pin -}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, pin::Pin}; use wasm_timer::SystemTime; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::{LocalTransactionPool, MaintainedTransactionPool}; @@ -67,7 +63,6 @@ use sc_client_api::{ proof_provider::ProofProvider, execution_extensions::ExecutionExtensions }; -use sp_core::storage::Storage; use sp_blockchain::{HeaderMetadata, HeaderBackend}; use crate::{ServiceComponents, TelemetryOnConnectSinks, RpcHandlers, NetworkStatusSinks}; @@ -523,6 +518,11 @@ impl self.remote_backend.clone() } + /// Consume the builder and return the parts needed for chain operations. + pub fn to_chain_ops_parts(self) -> (Arc, Arc, TImpQu, TaskManager) { + (self.client, self.backend, self.import_queue, self.task_manager) + } + /// Defines which head-of-chain strategy to use. pub fn with_opt_select_chain( self, @@ -840,50 +840,6 @@ impl } } -/// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate -/// components to the builder. -pub trait ServiceBuilderCommand { - /// Block type this API operates on. - type Block: BlockT; - /// Native execution dispatch required by some commands. - type NativeDispatch: NativeExecutionDispatch + 'static; - /// Starts the process of importing blocks. - fn import_blocks( - self, - input: impl Read + Seek + Send + 'static, - force: bool, - binary: bool, - ) -> Pin> + Send>>; - - /// Performs the blocks export. - fn export_blocks( - self, - output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>>; - - /// Performs a revert of `blocks` blocks. - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error>; - - /// Re-validate known block. - fn check_block( - self, - block: BlockId - ) -> Pin> + Send>>; - - /// Export the raw state at the given `block`. If `block` is `None`, the - /// best block will be used. - fn export_raw_state( - &self, - block: Option>, - ) -> Result; -} - impl ServiceBuilder< TBl, diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs deleted file mode 100644 index cb4ed24b60..0000000000 --- a/client/service/src/chain_ops.rs +++ /dev/null @@ -1,614 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Chain utilities. - -use crate::error; -use crate::builder::{ServiceBuilderCommand, ServiceBuilder}; -use crate::error::Error; -use sc_chain_spec::ChainSpec; -use log::{warn, info}; -use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion, MaybeSerializeDeserialize, -}; -use sp_runtime::generic::{BlockId, SignedBlock}; -use codec::{Decode, Encode, IoReader as CodecIoReader}; -use crate::client::{Client, LocalCallExecutor}; -use sp_consensus::{ - BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, -}; -use sc_executor::{NativeExecutor, NativeExecutionDispatch}; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; -use sc_client_api::{StorageProvider, BlockBackend, UsageProvider}; - -use std::{io::{Read, Write, Seek}, pin::Pin, collections::HashMap}; -use std::time::{Duration, Instant}; -use futures_timer::Delay; -use std::task::Poll; -use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; -use std::convert::{TryFrom, TryInto}; -use sp_runtime::traits::{CheckedDiv, Saturating}; - -/// Number of blocks we will add to the queue before waiting for the queue to catch up. -const MAX_PENDING_BLOCKS: u64 = 1_024; - -/// Number of milliseconds to wait until next poll. -const DELAY_TIME: u64 = 2_000; - -/// Number of milliseconds that must have passed between two updates. -const TIME_BETWEEN_UPDATES: u64 = 3_000; - -/// Build a chain spec json -pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { - spec.as_json(raw).map_err(Into::into) -} - - -/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder (from serde_json). -/// Implements the Iterator Trait, calling `next()` will decode the next SignedBlock and return it. -enum BlockIter where - R: std::io::Read + std::io::Seek, -{ - Binary { - // Total number of blocks we are expecting to decode. - num_expected_blocks: u64, - // Number of blocks we have decoded thus far. - read_block_count: u64, - // Reader to the data, used for decoding new blocks. - reader: CodecIoReader, - }, - Json { - // Nubmer of blocks we have decoded thus far. - read_block_count: u64, - // Stream to the data, used for decoding new blocks. - reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, - }, -} - -impl BlockIter where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - fn new(input: R, binary: bool) -> Result { - if binary { - let mut reader = CodecIoReader(input); - // If the file is encoded in binary format, it is expected to first specify the number - // of blocks that are going to be decoded. We read it and add it to our enum struct. - let num_expected_blocks: u64 = Decode::decode(&mut reader) - .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; - Ok(BlockIter::Binary { - num_expected_blocks, - read_block_count: 0, - reader, - }) - } else { - let stream_deser = Deserializer::from_reader(input) - .into_iter::>(); - Ok(BlockIter::Json { - reader: stream_deser, - read_block_count: 0, - }) - } - } - - /// Returns the number of blocks read thus far. - fn read_block_count(&self) -> u64 { - match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } - => *read_block_count, - } - } - - /// Returns the total number of blocks to be imported, if possible. - fn num_expected_blocks(&self) -> Option { - match self { - BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), - BlockIter::Json {..} => None - } - } -} - -impl Iterator for BlockIter where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - type Item = Result, String>; - - fn next(&mut self) -> Option { - match self { - BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { - if read_block_count < num_expected_blocks { - let block_result: Result, _> = SignedBlock::::decode(reader) - .map_err(|e| e.to_string()); - *read_block_count += 1; - Some(block_result) - } else { - // `read_block_count` == `num_expected_blocks` so we've read enough blocks. - None - } - } - BlockIter::Json { reader, read_block_count } => { - let res = Some(reader.next()?.map_err(|e| e.to_string())); - *read_block_count += 1; - res - } - } - } -} - -/// Imports the SignedBlock to the queue. -fn import_block_to_queue( - signed_block: SignedBlock, - queue: &mut TImpQu, - force: bool -) where - TBl: BlockT + MaybeSerializeDeserialize, - TImpQu: 'static + ImportQueue, -{ - let (header, extrinsics) = signed_block.block.deconstruct(); - let hash = header.hash(); - // import queue handles verification and importing it into the client. - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { - hash, - header: Some(header), - body: Some(extrinsics), - justification: signed_block.justification, - origin: None, - allow_missing_state: false, - import_existing: force, - } - ]); -} - -/// Returns true if we have imported every block we were supposed to import, else returns false. -fn importing_is_done( - num_expected_blocks: Option, - read_block_count: u64, - imported_blocks: u64 -) -> bool { - if let Some(num_expected_blocks) = num_expected_blocks { - imported_blocks >= num_expected_blocks - } else { - imported_blocks >= read_block_count - } -} - -/// Structure used to log the block importing speed. -struct Speedometer { - best_number: NumberFor, - last_number: Option>, - last_update: Instant, -} - -impl Speedometer { - /// Creates a fresh Speedometer. - fn new() -> Self { - Self { - best_number: NumberFor::::from(0), - last_number: None, - last_update: Instant::now(), - } - } - - /// Calculates `(best_number - last_number) / (now - last_update)` and - /// logs the speed of import. - fn display_speed(&self) { - // Number of milliseconds elapsed since last time. - let elapsed_ms = { - let elapsed = self.last_update.elapsed(); - let since_last_millis = elapsed.as_secs() * 1000; - let since_last_subsec_millis = elapsed.subsec_millis() as u64; - since_last_millis + since_last_subsec_millis - }; - - // Number of blocks that have been imported since last time. - let diff = match self.last_number { - None => return, - Some(n) => self.best_number.saturating_sub(n) - }; - - if let Ok(diff) = TryInto::::try_into(diff) { - // If the number of blocks can be converted to a regular integer, then it's easy: just - // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; - info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); - } else { - // If the number of blocks can't be converted to a regular integer, then we need a more - // algebraic approach and we stay within the realm of integers. - let one_thousand = NumberFor::::from(1_000); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) - ); - - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) - .unwrap_or_else(Zero::zero); - info!("📦 Current best block: {} ({} bps)", self.best_number, speed) - } - } - - /// Updates the Speedometer. - fn update(&mut self, best_number: NumberFor) { - self.last_number = Some(self.best_number); - self.best_number = best_number; - self.last_update = Instant::now(); - } - - // If more than TIME_BETWEEN_UPDATES has elapsed since last update, - // then print and update the speedometer. - fn notify_user(&mut self, best_number: NumberFor) { - let delta = Duration::from_millis(TIME_BETWEEN_UPDATES); - if Instant::now().duration_since(self.last_update) >= delta { - self.display_speed(); - self.update(best_number); - } - } -} - -/// Different State that the `import_blocks` future could be in. -enum ImportState where - R: Read + Seek + 'static, - B: BlockT + MaybeSerializeDeserialize, -{ - /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. - Reading{block_iter: BlockIter}, - /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to catch up. - WaitingForImportQueueToCatchUp{ - block_iter: BlockIter, - delay: Delay, - block: SignedBlock - }, - // We have added all the blocks to the queue but they are still being processed. - WaitingForImportQueueToFinish{ - num_expected_blocks: Option, - read_block_count: u64, - delay: Delay, - }, -} - -impl< - TBl, TRtApi, TBackend, - TExecDisp, TFchr, TSc, TImpQu, TFprb, TFpp, - TExPool, TRpc, Backend -> ServiceBuilderCommand for ServiceBuilder< - TBl, TRtApi, - Client>, TBl, TRtApi>, - TFchr, TSc, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend -> where - TBl: BlockT + MaybeSerializeDeserialize, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExecDisp: 'static + NativeExecutionDispatch, - TImpQu: 'static + ImportQueue, - TRtApi: 'static + Send + Sync, - Self: Send + 'static, -{ - type Block = TBl; - type NativeDispatch = TExecDisp; - - fn import_blocks( - mut self, - input: impl Read + Seek + Send + 'static, - force: bool, - binary: bool, - ) -> Pin> + Send>> { - struct WaitLink { - imported_blocks: u64, - has_error: bool, - } - - impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, - } - } - } - - impl Link for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; - } - } - } - } - - let mut link = WaitLink::new(); - let block_iter_res: Result, String> = BlockIter::new(input, binary); - - let block_iter = match block_iter_res { - Ok(block_iter) => block_iter, - Err(e) => { - // We've encountered an error while creating the block iterator - // so we can just return a future that returns an error. - return future::ready(Err(Error::Other(e))).boxed() - } - }; - - let mut state = Some(ImportState::Reading{block_iter}); - let mut speedometer = Speedometer::::new(); - - // Importing blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we read a block from the input or import a bunch of blocks from the import - // queue, the `Future` re-schedules itself and returns `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block imports, - // or to stop the operation completely. - let import = future::poll_fn(move |cx| { - let client = &self.client; - let queue = &mut self.import_queue; - match state.take().expect("state should never be None; qed") { - ImportState::Reading{mut block_iter} => { - match block_iter.next() { - None => { - // The iterator is over: we now need to wait for the import queue to finish. - let num_expected_blocks = block_iter.num_expected_blocks(); - let read_block_count = block_iter.read_block_count(); - let delay = Delay::new(Duration::from_millis(DELAY_TIME)); - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - }, - Some(block_result) => { - let read_block_count = block_iter.read_block_count(); - match block_result { - Ok(block) => { - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { - // The queue is full, so do not add this block and simply wait until - // the queue has made some progress. - let delay = Delay::new(Duration::from_millis(DELAY_TIME)); - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - } else { - // Queue is not full, we can keep on adding blocks to the queue. - import_block_to_queue(block, queue, force); - state = Some(ImportState::Reading{block_iter}); - } - } - Err(e) => { - return Poll::Ready( - Err(Error::Other(format!("Error reading block #{}: {}", read_block_count, e)))) - } - } - } - } - }, - ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { - let read_block_count = block_iter.read_block_count(); - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { - // Queue is still full, so wait until there is room to insert our block. - match Pin::new(&mut delay).poll(cx) { - Poll::Pending => { - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - return Poll::Pending - }, - Poll::Ready(_) => { - delay.reset(Duration::from_millis(DELAY_TIME)); - }, - } - state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block}); - } else { - // Queue is no longer full, so we can add our block to the queue. - import_block_to_queue(block, queue, force); - // Switch back to Reading state. - state = Some(ImportState::Reading{block_iter}); - } - }, - ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, mut delay} => { - // All the blocks have been added to the queue, which doesn't mean they - // have all been properly imported. - if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { - // Importing is done, we can log the result and return. - info!( - "🎉 Imported {} blocks. Best: #{}", - read_block_count, client.chain_info().best_number - ); - return Poll::Ready(Ok(())) - } else { - // Importing is not done, we still have to wait for the queue to finish. - // Wait for the delay, because we know the queue is lagging behind. - match Pin::new(&mut delay).poll(cx) { - Poll::Pending => { - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - return Poll::Pending - }, - Poll::Ready(_) => { - delay.reset(Duration::from_millis(DELAY_TIME)); - }, - } - - state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay}); - } - } - } - - queue.poll_actions(cx, &mut link); - - let best_number = client.chain_info().best_number; - speedometer.notify_user(best_number); - - if link.has_error { - return Poll::Ready(Err( - Error::Other( - format!("Stopping after #{} blocks because of an error", link.imported_blocks) - ) - )) - } - - cx.waker().wake_by_ref(); - Poll::Pending - }); - Box::pin(import) - } - - fn export_blocks( - self, - mut output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>> { - let mut block = from; - - let last = match to { - Some(v) if v.is_zero() => One::one(), - Some(v) => v, - None => self.client.chain_info().best_number, - }; - - let mut wrote_header = false; - - // Exporting blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we write a block to the output, the `Future` re-schedules itself and returns - // `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block exports, - // or to stop the operation completely. - let export = future::poll_fn(move |cx| { - let client = &self.client; - - if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); - } - - if !wrote_header { - info!("Exporting blocks from #{} to #{}", block, last); - if binary { - let last_: u64 = last.saturated_into::(); - let block_: u64 = block.saturated_into::(); - let len: u64 = last_ - block_ + 1; - output.write_all(&len.encode())?; - } - wrote_header = true; - } - - match client.block(&BlockId::number(block))? { - Some(block) => { - if binary { - output.write_all(&block.encode())?; - } else { - serde_json::to_writer(&mut output, &block) - .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, - // Reached end of the chain. - None => return Poll::Ready(Ok(())), - } - if (block % 10000.into()).is_zero() { - info!("#{}", block); - } - if block == last { - return Poll::Ready(Ok(())); - } - block += One::one(); - - // Re-schedule the task in order to continue the operation. - cx.waker().wake_by_ref(); - Poll::Pending - }); - - Box::pin(export) - } - - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error> { - let reverted = self.client.revert(blocks)?; - let info = self.client.chain_info(); - - if reverted.is_zero() { - info!("There aren't any non-finalized blocks to revert."); - } else { - info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); - } - Ok(()) - } - - fn check_block( - self, - block_id: BlockId - ) -> Pin> + Send>> { - match self.client.block(&block_id) { - Ok(Some(block)) => { - let mut buf = Vec::new(); - 1u64.encode_to(&mut buf); - block.encode_to(&mut buf); - let reader = std::io::Cursor::new(buf); - self.import_blocks(reader, true, true) - } - Ok(None) => Box::pin(future::err("Unknown block".into())), - Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), - } - } - - fn export_raw_state( - &self, - block: Option>, - ) -> Result { - let block = block.unwrap_or_else( - || BlockId::Hash(self.client.usage_info().chain.best_hash) - ); - - let empty_key = StorageKey(Vec::new()); - let mut top_storage = self.client.storage_pairs(&block, &empty_key)?; - let mut children_default = HashMap::new(); - - // Remove all default child storage roots from the top storage and collect the child storage - // pairs. - while let Some(pos) = top_storage - .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { - let (key, _) = top_storage.swap_remove(pos); - - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); - let child_info = ChildInfo::new_default(&key.0); - - let keys = self.client.child_storage_keys(&block, &child_info, &empty_key)?; - let mut pairs = StorageMap::new(); - keys.into_iter().try_for_each(|k| { - if let Some(value) = self.client.child_storage(&block, &child_info, &k)? { - pairs.insert(k.0, value.0); - } - - Ok::<_, Error>(()) - })?; - - children_default.insert(key.0, StorageChild { child_info, data: pairs }); - } - - let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); - Ok(Storage { top, children_default }) - } -} diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs new file mode 100644 index 0000000000..34baeb5544 --- /dev/null +++ b/client/service/src/chain_ops/check_block.rs @@ -0,0 +1,51 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use futures::{future, prelude::*}; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use codec::Encode; +use sp_consensus::import_queue::ImportQueue; +use sc_client_api::{BlockBackend, UsageProvider}; + +use std::pin::Pin; +use std::sync::Arc; +use crate::chain_ops::import_blocks; + +/// Re-validate known block. +pub fn check_block( + client: Arc, + import_queue: IQ, + block_id: BlockId +) -> Pin> + Send>> +where + C: BlockBackend + UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + match client.block(&block_id) { + Ok(Some(block)) => { + let mut buf = Vec::new(); + 1u64.encode_to(&mut buf); + block.encode_to(&mut buf); + let reader = std::io::Cursor::new(buf); + import_blocks(client, import_queue, reader, true, true) + } + Ok(None) => Box::pin(future::err("Unknown block".into())), + Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), + } +} diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs new file mode 100644 index 0000000000..2f32cbf7fb --- /dev/null +++ b/client/service/src/chain_ops/export_blocks.rs @@ -0,0 +1,104 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, One, Zero, SaturatedConversion +}; +use sp_runtime::generic::BlockId; +use codec::Encode; + +use std::{io::Write, pin::Pin}; +use sc_client_api::{BlockBackend, UsageProvider}; +use std::sync::Arc; +use std::task::Poll; + +/// Performs the blocks export. +pub fn export_blocks( + client: Arc, + mut output: impl Write + 'static, + from: NumberFor, + to: Option>, + binary: bool +) -> Pin>>> +where + C: BlockBackend + UsageProvider + 'static, + B: BlockT, +{ + let mut block = from; + + let last = match to { + Some(v) if v.is_zero() => One::one(), + Some(v) => v, + None => client.usage_info().chain.best_number, + }; + + let mut wrote_header = false; + + // Exporting blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we write a block to the output, the `Future` re-schedules itself and returns + // `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block exports, + // or to stop the operation completely. + let export = future::poll_fn(move |cx| { + let client = &client; + + if last < block { + return Poll::Ready(Err("Invalid block range specified".into())); + } + + if !wrote_header { + info!("Exporting blocks from #{} to #{}", block, last); + if binary { + let last_: u64 = last.saturated_into::(); + let block_: u64 = block.saturated_into::(); + let len: u64 = last_ - block_ + 1; + output.write_all(&len.encode())?; + } + wrote_header = true; + } + + match client.block(&BlockId::number(block))? { + Some(block) => { + if binary { + output.write_all(&block.encode())?; + } else { + serde_json::to_writer(&mut output, &block) + .map_err(|e| format!("Error writing JSON: {}", e))?; + } + }, + // Reached end of the chain. + None => return Poll::Ready(Ok(())), + } + if (block % 10000.into()).is_zero() { + info!("#{}", block); + } + if block == last { + return Poll::Ready(Ok(())); + } + block += One::one(); + + // Re-schedule the task in order to continue the operation. + cx.waker().wake_by_ref(); + Poll::Pending + }); + + Box::pin(export) +} diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs new file mode 100644 index 0000000000..3fe44dbdb1 --- /dev/null +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -0,0 +1,71 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; +use sc_client_api::{StorageProvider, UsageProvider}; + +use std::{collections::HashMap, sync::Arc}; + +/// Export the raw state at the given `block`. If `block` is `None`, the +/// best block will be used. +pub fn export_raw_state( + client: Arc, + block: Option>, +) -> Result +where + C: UsageProvider + StorageProvider, + B: BlockT, + BA: sc_client_api::backend::Backend, +{ + let block = block.unwrap_or_else( + || BlockId::Hash(client.usage_info().chain.best_hash) + ); + + let empty_key = StorageKey(Vec::new()); + let mut top_storage = client.storage_pairs(&block, &empty_key)?; + let mut children_default = HashMap::new(); + + // Remove all default child storage roots from the top storage and collect the child storage + // pairs. + while let Some(pos) = top_storage + .iter() + .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { + let (key, _) = top_storage.swap_remove(pos); + + let key = StorageKey( + key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), + ); + let child_info = ChildInfo::new_default(&key.0); + + let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; + let mut pairs = StorageMap::new(); + keys.into_iter().try_for_each(|k| { + if let Some(value) = client.child_storage(&block, &child_info, &k)? { + pairs.insert(k.0, value.0); + } + + Ok::<_, Error>(()) + })?; + + children_default.insert(key.0, StorageChild { child_info, data: pairs }); + } + + let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); + Ok(Storage { top, children_default }) +} diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs new file mode 100644 index 0000000000..46ad0d0501 --- /dev/null +++ b/client/service/src/chain_ops/import_blocks.rs @@ -0,0 +1,472 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::error; +use crate::error::Error; +use sc_chain_spec::ChainSpec; +use log::{warn, info}; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, Zero, Header, MaybeSerializeDeserialize, +}; +use sp_runtime::generic::SignedBlock; +use codec::{Decode, IoReader as CodecIoReader}; +use sp_consensus::{ + BlockOrigin, + import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, +}; + +use std::{io::{Read, Seek}, pin::Pin}; +use std::time::{Duration, Instant}; +use futures_timer::Delay; +use std::task::Poll; +use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; +use std::convert::{TryFrom, TryInto}; +use sp_runtime::traits::{CheckedDiv, Saturating}; +use sc_client_api::UsageProvider; + +/// Number of blocks we will add to the queue before waiting for the queue to catch up. +const MAX_PENDING_BLOCKS: u64 = 1_024; + +/// Number of milliseconds to wait until next poll. +const DELAY_TIME: u64 = 2_000; + +/// Number of milliseconds that must have passed between two updates. +const TIME_BETWEEN_UPDATES: u64 = 3_000; + +use std::sync::Arc; + +/// Build a chain spec json +pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { + spec.as_json(raw).map_err(Into::into) +} + + +/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder +/// (from serde_json). Implements the Iterator Trait, calling `next()` will decode the next +/// SignedBlock and return it. +enum BlockIter where + R: std::io::Read + std::io::Seek, +{ + Binary { + // Total number of blocks we are expecting to decode. + num_expected_blocks: u64, + // Number of blocks we have decoded thus far. + read_block_count: u64, + // Reader to the data, used for decoding new blocks. + reader: CodecIoReader, + }, + Json { + // Nubmer of blocks we have decoded thus far. + read_block_count: u64, + // Stream to the data, used for decoding new blocks. + reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, + }, +} + +impl BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + fn new(input: R, binary: bool) -> Result { + if binary { + let mut reader = CodecIoReader(input); + // If the file is encoded in binary format, it is expected to first specify the number + // of blocks that are going to be decoded. We read it and add it to our enum struct. + let num_expected_blocks: u64 = Decode::decode(&mut reader) + .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; + Ok(BlockIter::Binary { + num_expected_blocks, + read_block_count: 0, + reader, + }) + } else { + let stream_deser = Deserializer::from_reader(input) + .into_iter::>(); + Ok(BlockIter::Json { + reader: stream_deser, + read_block_count: 0, + }) + } + } + + /// Returns the number of blocks read thus far. + fn read_block_count(&self) -> u64 { + match self { + BlockIter::Binary { read_block_count, .. } + | BlockIter::Json { read_block_count, .. } + => *read_block_count, + } + } + + /// Returns the total number of blocks to be imported, if possible. + fn num_expected_blocks(&self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), + BlockIter::Json {..} => None + } + } +} + +impl Iterator for BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + type Item = Result, String>; + + fn next(&mut self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { + if read_block_count < num_expected_blocks { + let block_result: Result, _> = SignedBlock::::decode(reader) + .map_err(|e| e.to_string()); + *read_block_count += 1; + Some(block_result) + } else { + // `read_block_count` == `num_expected_blocks` so we've read enough blocks. + None + } + } + BlockIter::Json { reader, read_block_count } => { + let res = Some(reader.next()?.map_err(|e| e.to_string())); + *read_block_count += 1; + res + } + } + } +} + +/// Imports the SignedBlock to the queue. +fn import_block_to_queue( + signed_block: SignedBlock, + queue: &mut TImpQu, + force: bool +) where + TBl: BlockT + MaybeSerializeDeserialize, + TImpQu: 'static + ImportQueue, +{ + let (header, extrinsics) = signed_block.block.deconstruct(); + let hash = header.hash(); + // import queue handles verification and importing it into the client. + queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock:: { + hash, + header: Some(header), + body: Some(extrinsics), + justification: signed_block.justification, + origin: None, + allow_missing_state: false, + import_existing: force, + } + ]); +} + +/// Returns true if we have imported every block we were supposed to import, else returns false. +fn importing_is_done( + num_expected_blocks: Option, + read_block_count: u64, + imported_blocks: u64 +) -> bool { + if let Some(num_expected_blocks) = num_expected_blocks { + imported_blocks >= num_expected_blocks + } else { + imported_blocks >= read_block_count + } +} + +/// Structure used to log the block importing speed. +struct Speedometer { + best_number: NumberFor, + last_number: Option>, + last_update: Instant, +} + +impl Speedometer { + /// Creates a fresh Speedometer. + fn new() -> Self { + Self { + best_number: NumberFor::::from(0), + last_number: None, + last_update: Instant::now(), + } + } + + /// Calculates `(best_number - last_number) / (now - last_update)` and + /// logs the speed of import. + fn display_speed(&self) { + // Number of milliseconds elapsed since last time. + let elapsed_ms = { + let elapsed = self.last_update.elapsed(); + let since_last_millis = elapsed.as_secs() * 1000; + let since_last_subsec_millis = elapsed.subsec_millis() as u64; + since_last_millis + since_last_subsec_millis + }; + + // Number of blocks that have been imported since last time. + let diff = match self.last_number { + None => return, + Some(n) => self.best_number.saturating_sub(n) + }; + + if let Ok(diff) = TryInto::::try_into(diff) { + // If the number of blocks can be converted to a regular integer, then it's easy: just + // do the math and turn it into a `f64`. + let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / 10.0; + info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); + } else { + // If the number of blocks can't be converted to a regular integer, then we need a more + // algebraic approach and we stay within the realm of integers. + let one_thousand = NumberFor::::from(1_000); + let elapsed = NumberFor::::from( + >::try_from(elapsed_ms).unwrap_or(u32::max_value()) + ); + + let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + .unwrap_or_else(Zero::zero); + info!("📦 Current best block: {} ({} bps)", self.best_number, speed) + } + } + + /// Updates the Speedometer. + fn update(&mut self, best_number: NumberFor) { + self.last_number = Some(self.best_number); + self.best_number = best_number; + self.last_update = Instant::now(); + } + + // If more than TIME_BETWEEN_UPDATES has elapsed since last update, + // then print and update the speedometer. + fn notify_user(&mut self, best_number: NumberFor) { + let delta = Duration::from_millis(TIME_BETWEEN_UPDATES); + if Instant::now().duration_since(self.last_update) >= delta { + self.display_speed(); + self.update(best_number); + } + } +} + +/// Different State that the `import_blocks` future could be in. +enum ImportState where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. + Reading{block_iter: BlockIter}, + /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to + /// catch up. + WaitingForImportQueueToCatchUp{ + block_iter: BlockIter, + delay: Delay, + block: SignedBlock + }, + // We have added all the blocks to the queue but they are still being processed. + WaitingForImportQueueToFinish{ + num_expected_blocks: Option, + read_block_count: u64, + delay: Delay, + }, +} + +/// Starts the process of importing blocks. +pub fn import_blocks( + client: Arc, + mut import_queue: IQ, + input: impl Read + Seek + Send + 'static, + force: bool, + binary: bool, +) -> Pin> + Send>> +where + C: UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } + + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } + } + } + + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _num_expected_blocks: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } + } + } + } + + let mut link = WaitLink::new(); + let block_iter_res: Result, String> = BlockIter::new(input, binary); + + let block_iter = match block_iter_res { + Ok(block_iter) => block_iter, + Err(e) => { + // We've encountered an error while creating the block iterator + // so we can just return a future that returns an error. + return future::ready(Err(Error::Other(e))).boxed() + } + }; + + let mut state = Some(ImportState::Reading{block_iter}); + let mut speedometer = Speedometer::::new(); + + // Importing blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we read a block from the input or import a bunch of blocks from the import + // queue, the `Future` re-schedules itself and returns `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block imports, + // or to stop the operation completely. + let import = future::poll_fn(move |cx| { + let client = &client; + let queue = &mut import_queue; + match state.take().expect("state should never be None; qed") { + ImportState::Reading{mut block_iter} => { + match block_iter.next() { + None => { + // The iterator is over: we now need to wait for the import queue to finish. + let num_expected_blocks = block_iter.num_expected_blocks(); + let read_block_count = block_iter.read_block_count(); + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + }, + Some(block_result) => { + let read_block_count = block_iter.read_block_count(); + match block_result { + Ok(block) => { + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // The queue is full, so do not add this block and simply wait + // until the queue has made some progress. + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is not full, we can keep on adding blocks to the queue. + import_block_to_queue(block, queue, force); + state = Some(ImportState::Reading{block_iter}); + } + } + Err(e) => { + return Poll::Ready( + Err(Error::Other( + format!("Error reading block #{}: {}", read_block_count, e) + ))) + } + } + } + } + }, + ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { + let read_block_count = block_iter.read_block_count(); + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // Queue is still full, so wait until there is room to insert our block. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is no longer full, so we can add our block to the queue. + import_block_to_queue(block, queue, force); + // Switch back to Reading state. + state = Some(ImportState::Reading{block_iter}); + } + }, + ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, mut delay + } => { + // All the blocks have been added to the queue, which doesn't mean they + // have all been properly imported. + if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { + // Importing is done, we can log the result and return. + info!( + "🎉 Imported {} blocks. Best: #{}", + read_block_count, client.usage_info().chain.best_number + ); + return Poll::Ready(Ok(())) + } else { + // Importing is not done, we still have to wait for the queue to finish. + // Wait for the delay, because we know the queue is lagging behind. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + } + } + } + + queue.poll_actions(cx, &mut link); + + let best_number = client.usage_info().chain.best_number; + speedometer.notify_user(best_number); + + if link.has_error { + return Poll::Ready(Err( + Error::Other( + format!("Stopping after #{} blocks because of an error", link.imported_blocks) + ) + )) + } + + cx.waker().wake_by_ref(); + Poll::Pending + }); + Box::pin(import) +} diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs new file mode 100644 index 0000000000..af6e6f632f --- /dev/null +++ b/client/service/src/chain_ops/mod.rs @@ -0,0 +1,29 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Chain utilities. + +mod check_block; +mod export_blocks; +mod export_raw_state; +mod import_blocks; +mod revert_chain; + +pub use check_block::*; +pub use export_blocks::*; +pub use export_raw_state::*; +pub use import_blocks::*; +pub use revert_chain::*; diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs new file mode 100644 index 0000000000..129aea0408 --- /dev/null +++ b/client/service/src/chain_ops/revert_chain.rs @@ -0,0 +1,43 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sc_client_api::{Backend, UsageProvider}; +use std::sync::Arc; + +/// Performs a revert of `blocks` blocks. +pub fn revert_chain( + client: Arc, + backend: Arc, + blocks: NumberFor +) -> Result<(), Error> +where + B: BlockT, + C: UsageProvider, + BA: Backend, +{ + let reverted = backend.revert(blocks, false)?; + let info = client.usage_info().chain; + + if reverted.is_zero() { + info!("There aren't any non-finalized blocks to revert."); + } else { + info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); + } + Ok(()) +} diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index c3c8f60e68..1d41490956 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -23,7 +23,6 @@ #![recursion_limit="128"] pub mod config; -#[macro_use] pub mod chain_ops; pub mod error; @@ -55,7 +54,7 @@ use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, pub use self::error::Error; pub use self::builder::{ new_full_client, new_client, - ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, + ServiceBuilder, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, }; pub use config::{ @@ -79,6 +78,7 @@ pub use sc_network::config::{ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; +pub use sp_consensus::import_queue::ImportQueue; use sc_client_api::{Backend, BlockchainEvents}; const DEFAULT_PROTOCOL_ID: &str = "sup"; -- GitLab From 46656f765de3f99ceed967813e7b5260fee264be Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 2 Jul 2020 13:02:08 +0200 Subject: [PATCH 112/144] Drop the tokio runtime before the task_manager (#6548) * Initial commit Forked at: 4919c808cb75618d95762944aa6f5664c1aa3b59 Parent branch: origin/master * Drop the tokio runtime before the task_manager The tokio runtime must be dropped before the task_manager. Otherwise the objects the task_manager keep alive are dropped before the tasks are finished. --- client/cli/src/runner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 807a5620ec..05445c9d85 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -236,7 +236,7 @@ impl Runner { self.tokio_runtime.block_on(main(task_manager.future().fuse())) .map_err(|e| e.to_string())?; task_manager.terminate(); - drop(task_manager); + drop(self.tokio_runtime); Ok(()) } -- GitLab From 958443c5c5cb0c6d4d7560e0ae850df28f98a8c0 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Thu, 2 Jul 2020 13:34:56 +0200 Subject: [PATCH 113/144] Update SubstrateCli to return String (#6550) * Update SubstrateCli to return String * Add default implementation for executable_name() * Use display instead of PathBuf * Get file_name in default impl of executable_name * Remove String::from and use .into() * Use default impl for executable_name() * Use .as_str() and remove useless .to_string() * Update only sp-io when running companion build * Remove unneeded update of sp-io in CI Co-authored-by: Cecile Tonglet --- .../gitlab/check_polkadot_companion_build.sh | 4 -- bin/node-template/node/src/command.rs | 24 +++++------ bin/node/cli/src/command.rs | 24 +++++------ client/cli/src/config.rs | 4 +- client/cli/src/lib.rs | 42 ++++++++++++------- client/service/src/builder.rs | 10 ++--- client/service/src/config.rs | 4 +- client/service/test/src/lib.rs | 4 +- primitives/panic-handler/src/lib.rs | 7 ++-- utils/browser/src/lib.rs | 4 +- 10 files changed, 65 insertions(+), 62 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 281fa8e1e8..26ee73ef71 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -111,9 +111,5 @@ echo "paths = [ \"$SUBSTRATE_PATH\" ]" > .cargo/config mkdir -p target/debug/wbuild/.cargo cp .cargo/config target/debug/wbuild/.cargo/config -# package, others are updated along the way. -cargo update - # Test Polkadot pr or master branch with this Substrate commit. time cargo test --all --release --verbose - diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 1bc436a063..3391ad2c89 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -21,34 +21,30 @@ use crate::service; use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; impl SubstrateCli for Cli { - fn impl_name() -> &'static str { - "Substrate Node" + fn impl_name() -> String { + "Substrate Node".into() } - fn impl_version() -> &'static str { - env!("SUBSTRATE_CLI_IMPL_VERSION") + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() } - fn description() -> &'static str { - env!("CARGO_PKG_DESCRIPTION") + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() } - fn author() -> &'static str { - env!("CARGO_PKG_AUTHORS") + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() } - fn support_url() -> &'static str { - "support.anonymous.an" + fn support_url() -> String { + "support.anonymous.an".into() } fn copyright_start_year() -> i32 { 2017 } - fn executable_name() -> &'static str { - env!("CARGO_PKG_NAME") - } - fn load_spec(&self, id: &str) -> Result, String> { Ok(match id { "dev" => Box::new(chain_spec::development_config()), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 4ac796370c..91bec64edd 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -22,34 +22,30 @@ use node_runtime::{Block, RuntimeApi}; use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; impl SubstrateCli for Cli { - fn impl_name() -> &'static str { - "Substrate Node" + fn impl_name() -> String { + "Substrate Node".into() } - fn impl_version() -> &'static str { - env!("SUBSTRATE_CLI_IMPL_VERSION") + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() } - fn description() -> &'static str { - env!("CARGO_PKG_DESCRIPTION") + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() } - fn author() -> &'static str { - env!("CARGO_PKG_AUTHORS") + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() } - fn support_url() -> &'static str { - "https://github.com/paritytech/substrate/issues/new" + fn support_url() -> String { + "https://github.com/paritytech/substrate/issues/new".into() } fn copyright_start_year() -> i32 { 2017 } - fn executable_name() -> &'static str { - "substrate" - } - fn load_spec(&self, id: &str) -> std::result::Result, String> { Ok(match id { "dev" => Box::new(chain_spec::development_config()), diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 5563f46115..fa3f09116c 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -413,7 +413,7 @@ pub trait CliConfiguration: Sized { let chain_spec = cli.load_spec(chain_id.as_str())?; let base_path = self .base_path()? - .unwrap_or_else(|| BasePath::from_project("", "", C::executable_name())); + .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); let config_dir = base_path .path() .to_path_buf() @@ -498,7 +498,7 @@ pub trait CliConfiguration: Sized { fn init(&self) -> Result<()> { let logger_pattern = self.log_filters()?; - sp_panic_handler::set(C::support_url(), C::impl_version()); + sp_panic_handler::set(&C::support_url(), &C::impl_version()); fdlimit::raise_fd_limit(); init_logger(&logger_pattern); diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index a702edba65..c7f48d2721 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -57,25 +57,33 @@ use structopt::{ /// its own implementation that will fill the necessary field based on the trait's functions. pub trait SubstrateCli: Sized { /// Implementation name. - fn impl_name() -> &'static str; + fn impl_name() -> String; /// Implementation version. /// /// By default this will look like this: 2.0.0-b950f731c-x86_64-linux-gnu where the hash is the /// short commit hash of the commit of in the Git repository. - fn impl_version() -> &'static str; + fn impl_version() -> String; /// Executable file name. - fn executable_name() -> &'static str; + /// + /// Extracts the file name from `std::env::current_exe()`. + /// Resorts to the env var `CARGO_PKG_NAME` in case of Error. + fn executable_name() -> String { + std::env::current_exe().ok() + .and_then(|e| e.file_name().map(|s| s.to_os_string())) + .and_then(|w| w.into_string().ok()) + .unwrap_or_else(|| env!("CARGO_PKG_NAME").into()) + } /// Executable file description. - fn description() -> &'static str; + fn description() -> String; /// Executable file author. - fn author() -> &'static str; + fn author() -> String; /// Support URL. - fn support_url() -> &'static str; + fn support_url() -> String; /// Copyright starting year (x-current year) fn copyright_start_year() -> i32; @@ -116,13 +124,16 @@ pub trait SubstrateCli: Sized { { let app = ::clap(); - let mut full_version = Self::impl_version().to_string(); + let mut full_version = Self::impl_version(); full_version.push_str("\n"); + let name = Self::executable_name(); + let author = Self::author(); + let about = Self::description(); let app = app - .name(Self::executable_name()) - .author(Self::author()) - .about(Self::description()) + .name(name) + .author(author.as_str()) + .about(about.as_str()) .version(full_version.as_str()) .settings(&[ AppSettings::GlobalVersion, @@ -175,13 +186,16 @@ pub trait SubstrateCli: Sized { { let app = ::clap(); - let mut full_version = Self::impl_version().to_string(); + let mut full_version = Self::impl_version(); full_version.push_str("\n"); + let name = Self::executable_name(); + let author = Self::author(); + let about = Self::description(); let app = app - .name(Self::executable_name()) - .author(Self::author()) - .about(Self::description()) + .name(name) + .author(author.as_str()) + .about(about.as_str()) .version(full_version.as_str()); let matches = app.get_matches_from_safe(iter)?; diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3a1c5c85af..16b41e135a 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1203,8 +1203,8 @@ fn build_telemetry( let is_authority = config.role.is_authority(); let network_id = network.local_peer_id().to_base58(); let name = config.network.node_name.clone(); - let impl_name = config.impl_name.to_owned(); - let version = config.impl_version; + let impl_name = config.impl_name.clone(); + let impl_version = config.impl_version.clone(); let chain_name = config.chain_spec.name().to_owned(); let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { endpoints, @@ -1221,7 +1221,7 @@ fn build_telemetry( telemetry!(SUBSTRATE_INFO; "system.connected"; "name" => name.clone(), "implementation" => impl_name.clone(), - "version" => version, + "version" => impl_version.clone(), "config" => "", "chain" => chain_name.clone(), "genesis_hash" => ?genesis_hash, @@ -1270,8 +1270,8 @@ fn gen_handler( let system_info = sc_rpc::system::SystemInfo { chain_name: config.chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), + impl_name: config.impl_name.clone(), + impl_version: config.impl_version.clone(), properties: config.chain_spec.properties(), chain_type: config.chain_spec.chain_type(), }; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 5015ce7fac..f3080005a6 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -37,9 +37,9 @@ use tempfile::TempDir; #[derive(Debug)] pub struct Configuration { /// Implementation name - pub impl_name: &'static str, + pub impl_name: String, /// Implementation version (see sc-cli to see an example of format) - pub impl_version: &'static str, + pub impl_version: String, /// Node role. pub role: Role, /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 5a676e5263..ac95dd11e8 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -229,8 +229,8 @@ fn node_config"); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index c8034d9466..9313d41bf5 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -79,8 +79,8 @@ where disable_grandpa: Default::default(), execution_strategies: Default::default(), force_authoring: Default::default(), - impl_name: "parity-substrate", - impl_version: "0.0.0", + impl_name: String::from("parity-substrate"), + impl_version: String::from("0.0.0"), offchain_worker: Default::default(), prometheus_config: Default::default(), pruning: Default::default(), -- GitLab From b6e2677419e3eae6be9129667477916499ac5c7a Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Fri, 3 Jul 2020 01:05:15 +1200 Subject: [PATCH 114/144] Allow specify schedule dispatch origin (#6387) * allow specify schedule dispatch origin * fix tests * use caller origin for scheduled * fix tests * line width * check origin for cancel * line width * fix some issues for benchmarking * fix doc test * another way to constraint origin * fix build issues * fix cancel * line width * fix benchmarks * bump version * enable runtime upgrade * add migration code and test * Update frame/scheduler/src/lib.rs Co-authored-by: Gavin Wood * expose migration method * add notes * bump version * remove on_runtime_upgrade * fix test Co-authored-by: Gavin Wood --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 3 + frame/collective/src/lib.rs | 2 +- frame/democracy/src/benchmarking.rs | 1 + frame/democracy/src/lib.rs | 6 +- frame/democracy/src/tests.rs | 5 +- frame/scheduler/Cargo.toml | 7 +- frame/scheduler/src/benchmarking.rs | 1 + frame/scheduler/src/lib.rs | 544 +++++++++++++++++++++------ frame/support/src/dispatch.rs | 4 +- frame/support/src/metadata.rs | 2 +- frame/support/src/origin.rs | 51 ++- frame/support/src/traits.rs | 9 +- frame/support/test/tests/instance.rs | 7 +- frame/support/test/tests/system.rs | 2 +- frame/system/src/lib.rs | 2 +- 16 files changed, 509 insertions(+), 138 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b3dd853538..540ddaa4a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4502,6 +4502,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std", + "substrate-test-utils", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 969e66653e..7bec203f8c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -255,8 +255,10 @@ parameter_types! { impl pallet_scheduler::Trait for Runtime { type Event = Event; type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; } parameter_types! { @@ -455,6 +457,7 @@ impl pallet_democracy::Trait for Runtime { type OperationalPreimageOrigin = pallet_collective::EnsureMember; type Slash = Treasury; type Scheduler = Scheduler; + type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 2be0241243..83116080d0 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -97,7 +97,7 @@ pub trait Trait: frame_system::Trait { } /// Origin for the collective module. -#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] pub enum RawOrigin { /// It has been condoned by a given number of members of the collective from a given total. Members(MemberCount, MemberCount), diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index ba3b9a0b13..77d49c80fc 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -79,6 +79,7 @@ fn add_referendum(n: u32) -> Result { 1.into(), None, 63, + system::RawOrigin::Root.into(), Call::enact_proposal(proposal_hash, referendum_index).into(), ).map_err(|_| "failed to schedule named")?; Ok(referendum_index) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 79cc136d45..b005ad3641 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -279,7 +279,10 @@ pub trait Trait: frame_system::Trait + Sized { type Slash: OnUnbalanced>; /// The Scheduler. - type Scheduler: ScheduleNamed; + type Scheduler: ScheduleNamed; + + /// Overarching type of all pallets origins. + type PalletsOrigin: From>; /// The maximum number of votes for an account. /// @@ -1625,6 +1628,7 @@ impl Module { when, None, 63, + system::RawOrigin::Root.into(), Call::enact_proposal(status.proposal_hash, index).into(), ).is_err() { frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index b92f4bd076..2f300ec8bc 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -31,7 +31,7 @@ use sp_runtime::{ testing::Header, Perbill, }; use pallet_balances::{BalanceLock, Error as BalancesError}; -use frame_system::EnsureSignedBy; +use frame_system::{EnsureSignedBy, EnsureRoot}; mod cancellation; mod delegation; @@ -123,8 +123,10 @@ parameter_types! { impl pallet_scheduler::Trait for Test { type Event = Event; type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -196,6 +198,7 @@ impl super::Trait for Test { type Scheduler = Scheduler; type MaxVotes = MaxVotes; type OperationalPreimageOrigin = EnsureSignedBy; + type PalletsOrigin = OriginCaller; } pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 43507bd364..003a8c20c5 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -21,6 +21,7 @@ frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = " [dev-dependencies] sp-core = { version = "2.0.0-rc4", path = "../../primitives/core", default-features = false } +substrate-test-utils = { version = "2.0.0-rc4", path = "../../test-utils" } [features] default = ["std"] @@ -34,4 +35,8 @@ std = [ "sp-io/std", "sp-std/std" ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 5c580b5525..748017829f 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -44,6 +44,7 @@ fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static Some((T::BlockNumber::one(), 100)), // HARD_DEADLINE priority means it gets executed no matter what 0, + frame_system::RawOrigin::Root.into(), call.clone().into(), )?; } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 6b47e62587..1b3517382f 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -28,6 +28,14 @@ //! specified block number or at a specified period. These scheduled dispatches //! may be named or anonymous and may be canceled. //! +//! **NOTE:** The scheduled calls will be dispatched with the default filter +//! for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +//! except root which will get no filter. And not the filter contained in origin +//! use to call `fn schedule`. +//! +//! If a call is scheduled using proxy or whatever mecanism which adds filter, +//! then those filter will not be used when dispatching the schedule call. +//! //! ## Interface //! //! ### Dispatchable Functions @@ -45,16 +53,16 @@ mod benchmarking; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{RuntimeDebug, traits::{Zero, One}}; +use sp_std::{prelude::*, marker::PhantomData, borrow::Borrow}; +use codec::{Encode, Decode, Codec}; +use sp_runtime::{RuntimeDebug, traits::{Zero, One, BadOrigin}}; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, + decl_module, decl_storage, decl_event, decl_error, IterableStorageMap, dispatch::{Dispatchable, DispatchError, DispatchResult, Parameter}, - traits::{Get, schedule}, + traits::{Get, schedule, OriginTrait, EnsureOrigin, IsType}, weights::{GetDispatchInfo, Weight}, }; -use frame_system::{self as system, ensure_root}; +use frame_system::{self as system}; /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits @@ -66,7 +74,11 @@ pub trait Trait: system::Trait { type Event: From> + Into<::Event>; /// The aggregated origin which the dispatch will take. - type Origin: From>; + type Origin: OriginTrait + From + IsType<::Origin>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: From> + Codec + Clone + Eq; /// The aggregated call type. type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; @@ -74,6 +86,9 @@ pub trait Trait: system::Trait { /// The maximum weight that may be scheduled per block for any dispatchables of less priority /// than `schedule::HARD_DEADLINE`. type MaximumWeight: Get; + + /// Required origin to schedule or cancel calls. + type ScheduleOrigin: EnsureOrigin<::Origin>; } /// Just a simple index for naming period tasks. @@ -81,9 +96,19 @@ pub type PeriodicIndex = u32; /// The location of a scheduled task that can be used to remove it. pub type TaskAddress = (BlockNumber, u32); +#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] +#[derive(Clone, RuntimeDebug, Encode, Decode)] +struct ScheduledV1 { + maybe_id: Option>, + priority: schedule::Priority, + call: Call, + maybe_periodic: Option>, +} + /// Information regarding an item to be executed in the future. +#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] -pub struct Scheduled { +pub struct ScheduledV2 { /// The unique identity for this task, if there is one. maybe_id: Option>, /// This task's priority. @@ -92,16 +117,42 @@ pub struct Scheduled { call: Call, /// If the call is periodic, then this points to the information concerning that. maybe_periodic: Option>, + /// The origin to dispatch the call. + origin: PalletsOrigin, + _phantom: PhantomData, +} + +/// The current version of Scheduled struct. +pub type Scheduled = ScheduledV2; + +// A value placed in storage that represents the current version of the Scheduler storage. +// This value is used by the `on_runtime_upgrade` logic to determine whether we run +// storage migration logic. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +enum Releases { + V1, + V2, +} + +impl Default for Releases { + fn default() -> Self { + Releases::V1 + } } decl_storage! { trait Store for Module as Scheduler { /// Items to be executed, indexed by the block number that they should be executed on. pub Agenda: map hasher(twox_64_concat) T::BlockNumber - => Vec::Call, T::BlockNumber>>>; + => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; /// Lookup from identity to the block number and index of the task. Lookup: map hasher(twox_64_concat) Vec => Option>; + + /// Storage version of the pallet. + /// + /// New networks start with last version. + StorageVersion build(|_| Releases::V2): Releases; } } @@ -127,6 +178,7 @@ decl_error! { decl_module! { /// Scheduler module declaration. pub struct Module for enum Call where origin: ::Origin { + type Error = Error; fn deposit_event() = default; /// Anonymously schedule a task. @@ -146,8 +198,9 @@ decl_module! { priority: schedule::Priority, call: Box<::Call>, ) { - ensure_root(origin)?; - Self::do_schedule(when, maybe_periodic, priority, *call)?; + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::Origin::from(origin); + Self::do_schedule(when, maybe_periodic, priority, origin.caller().clone(), *call)?; } /// Cancel an anonymously scheduled task. @@ -162,8 +215,9 @@ decl_module! { /// # #[weight = 100_000_000 + T::DbWeight::get().reads_writes(1, 2)] fn cancel(origin, when: T::BlockNumber, index: u32) { - ensure_root(origin)?; - Self::do_cancel((when, index))?; + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::Origin::from(origin); + Self::do_cancel(Some(origin.caller().clone()), (when, index))?; } /// Schedule a named task. @@ -184,8 +238,9 @@ decl_module! { priority: schedule::Priority, call: Box<::Call>, ) { - ensure_root(origin)?; - Self::do_schedule_named(id, when, maybe_periodic, priority, *call)?; + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::Origin::from(origin); + Self::do_schedule_named(id, when, maybe_periodic, priority, origin.caller().clone(), *call)?; } /// Cancel a named scheduled task. @@ -200,8 +255,9 @@ decl_module! { /// # #[weight = 100_000_000 + T::DbWeight::get().reads_writes(2, 2)] fn cancel_named(origin, id: Vec) { - ensure_root(origin)?; - Self::do_cancel_named(id)?; + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::Origin::from(origin); + Self::do_cancel_named(Some(origin.caller().clone()), id)?; } /// Execute the scheduled calls @@ -249,7 +305,7 @@ decl_module! { // - It does not push the weight past the limit. // - It is the first item in the schedule if s.priority <= schedule::HARD_DEADLINE || cumulative_weight <= limit || order == 0 { - let r = s.call.clone().dispatch(system::RawOrigin::Root.into()); + let r = s.call.clone().dispatch(s.origin.clone().into()); let maybe_id = s.maybe_id.clone(); if let &Some((period, count)) = &s.maybe_periodic { if count > 1 { @@ -291,10 +347,39 @@ decl_module! { } impl Module { + /// Migrate storage format from V1 to V2. + /// Return true if migration is performed. + pub fn migrate_v1_to_t2() -> bool { + if StorageVersion::get() == Releases::V1 { + StorageVersion::put(Releases::V2); + + Agenda::::translate::< + Vec::Call, T::BlockNumber>>>, _ + >(|_, agenda| Some( + agenda + .into_iter() + .map(|schedule| schedule.map(|schedule| ScheduledV2 { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), + })) + .collect::>() + )); + + true + } else { + false + } + } + fn do_schedule( when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, + origin: T::PalletsOrigin, call: ::Call ) -> Result, DispatchError> { if when <= frame_system::Module::::block_number() { @@ -306,7 +391,9 @@ impl Module { .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Some(Scheduled { maybe_id: None, priority, call, maybe_periodic }); + let s = Some(Scheduled { + maybe_id: None, priority, call, maybe_periodic, origin, _phantom: PhantomData::::default(), + }); Agenda::::append(when, s); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; Self::deposit_event(RawEvent::Scheduled(when, index)); @@ -314,8 +401,25 @@ impl Module { Ok((when, index)) } - fn do_cancel((when, index): TaskAddress) -> Result<(), DispatchError> { - if let Some(s) = Agenda::::mutate(when, |agenda| agenda.get_mut(index as usize).and_then(Option::take)) { + fn do_cancel( + origin: Option, + (when, index): TaskAddress + ) -> Result<(), DispatchError> { + let scheduled = Agenda::::try_mutate( + when, + |agenda| { + agenda.get_mut(index as usize) + .map_or(Ok(None), |s| -> Result>, DispatchError> { + if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { + if *o != s.origin { + return Err(BadOrigin.into()); + } + }; + Ok(s.take()) + }) + }, + )?; + if let Some(s) = scheduled { if let Some(id) = s.maybe_id { Lookup::::remove(id); } @@ -331,6 +435,7 @@ impl Module { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, + origin: T::PalletsOrigin, call: ::Call, ) -> Result, DispatchError> { // ensure id it is unique @@ -348,7 +453,9 @@ impl Module { // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Scheduled { maybe_id: Some(id.clone()), priority, call, maybe_periodic }; + let s = Scheduled { + maybe_id: Some(id.clone()), priority, call, maybe_periodic, origin, _phantom: Default::default() + }; Agenda::::append(when, Some(s)); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; let address = (when, index); @@ -358,36 +465,49 @@ impl Module { Ok(address) } - fn do_cancel_named(id: Vec) -> Result<(), DispatchError> { - if let Some((when, index)) = Lookup::::take(id) { - let i = index as usize; - Agenda::::mutate(when, |agenda| if let Some(s) = agenda.get_mut(i) { *s = None }); - Self::deposit_event(RawEvent::Canceled(when, index)); - Ok(()) - } else { - Err(Error::::FailedToCancel)? - } + fn do_cancel_named(origin: Option, id: Vec) -> DispatchResult { + Lookup::::try_mutate_exists(id, |lookup| -> DispatchResult { + if let Some((when, index)) = lookup.take() { + let i = index as usize; + Agenda::::try_mutate(when, |agenda| -> DispatchResult { + if let Some(s) = agenda.get_mut(i) { + if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { + if *o != s.origin { + return Err(BadOrigin.into()); + } + } + *s = None; + } + Ok(()) + })?; + Self::deposit_event(RawEvent::Canceled(when, index)); + Ok(()) + } else { + Err(Error::::FailedToCancel)? + } + }) } } -impl schedule::Anon::Call> for Module { +impl schedule::Anon::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule( when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, + origin: T::PalletsOrigin, call: ::Call ) -> Result { - Self::do_schedule(when, maybe_periodic, priority, call) + Self::do_schedule(when, maybe_periodic, priority, origin, call) } fn cancel((when, index): Self::Address) -> Result<(), ()> { - Self::do_cancel((when, index)).map_err(|_| ()) + Self::do_cancel(None, (when, index)).map_err(|_| ()) } } -impl schedule::Named::Call> for Module { +impl schedule::Named::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule_named( @@ -395,13 +515,14 @@ impl schedule::Named::Call> for Module when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, + origin: T::PalletsOrigin, call: ::Call, ) -> Result { - Self::do_schedule_named(id, when, maybe_periodic, priority, call).map_err(|_| ()) + Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } fn cancel_named(id: Vec) -> Result<(), ()> { - Self::do_cancel_named(id).map_err(|_| ()) + Self::do_cancel_named(None, id).map_err(|_| ()) } } @@ -410,8 +531,10 @@ mod tests { use super::*; use frame_support::{ - impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, - assert_err, traits::{OnInitialize, OnFinalize, Filter}, weights::constants::RocksDbWeight, + impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, ord_parameter_types, + assert_noop, assert_err, Hashable, + traits::{OnInitialize, OnFinalize, Filter}, + weights::constants::RocksDbWeight, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures @@ -421,41 +544,48 @@ mod tests { testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; + use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; use crate as scheduler; mod logger { use super::*; use std::cell::RefCell; - use frame_system::ensure_root; thread_local! { - static LOG: RefCell> = RefCell::new(Vec::new()); + static LOG: RefCell> = RefCell::new(Vec::new()); } - pub fn log() -> Vec { + pub fn log() -> Vec<(OriginCaller, u32)> { LOG.with(|log| log.borrow().clone()) } pub trait Trait: system::Trait { type Event: From + Into<::Event>; } - decl_storage! { - trait Store for Module as Logger { - } - } decl_event! { pub enum Event { Logged(u32, Weight), } } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call + where + origin: ::Origin, + ::Origin: OriginTrait + { fn deposit_event() = default; #[weight = *weight] fn log(origin, i: u32, weight: Weight) { - ensure_root(origin)?; Self::deposit_event(Event::Logged(i, weight)); LOG.with(|log| { - log.borrow_mut().push(i); + log.borrow_mut().push((origin.caller().clone(), i)); + }) + } + + #[weight = *weight] + fn log_without_filter(origin, i: u32, weight: Weight) { + Self::deposit_event(Event::Logged(i, weight)); + LOG.with(|log| { + log.borrow_mut().push((origin.caller().clone(), i)); }) } } @@ -485,7 +615,7 @@ mod tests { pub struct BaseFilter; impl Filter for BaseFilter { fn filter(call: &Call) -> bool { - !matches!(call, Call::Logger(_)) + !matches!(call, Call::Logger(logger::Call::log(_, _))) } } @@ -532,11 +662,17 @@ mod tests { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); } + ord_parameter_types! { + pub const One: u64 = 1; + } + impl Trait for Test { type Event = (); type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureOneOf, EnsureSignedBy>; } type System = system::Module; type Logger = logger::Module; @@ -557,18 +693,22 @@ mod tests { } } + fn root() -> OriginCaller { + system::RawOrigin::Root.into() + } + #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); assert!(!::BaseCallFilter::filter(&call)); - let _ = Scheduler::do_schedule(4, None, 127, call); + let _ = Scheduler::do_schedule(4, None, 127, root(), call); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(100); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); }); } @@ -576,21 +716,23 @@ mod tests { fn periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. - let _ = Scheduler::do_schedule(4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))); + let _ = Scheduler::do_schedule( + 4, Some((3, 3)), 127, root(), Call::Logger(logger::Call::log(42, 1000)) + ); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(6); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(7); - assert_eq!(logger::log(), vec![42u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); run_to_block(9); - assert_eq!(logger::log(), vec![42u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); run_to_block(10); - assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); run_to_block(100); - assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); }); } @@ -598,12 +740,16 @@ mod tests { fn cancel_named_scheduling_works_with_normal_cancel() { new_test_ext().execute_with(|| { // at #4. - Scheduler::do_schedule_named(1u32.encode(), 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); - let i = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))).unwrap(); + Scheduler::do_schedule_named( + 1u32.encode(), 4, None, 127, root(), Call::Logger(logger::Call::log(69, 1000)) + ).unwrap(); + let i = Scheduler::do_schedule( + 4, None, 127, root(), Call::Logger(logger::Call::log(42, 1000)) + ).unwrap(); run_to_block(3); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::do_cancel_named(1u32.encode())); - assert_ok!(Scheduler::do_cancel(i)); + assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); + assert_ok!(Scheduler::do_cancel(None, i)); run_to_block(100); assert!(logger::log().is_empty()); }); @@ -613,53 +759,71 @@ mod tests { fn cancel_named_periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. - Scheduler::do_schedule_named(1u32.encode(), 4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))).unwrap(); + Scheduler::do_schedule_named( + 1u32.encode(), 4, Some((3, 3)), 127, root(), Call::Logger(logger::Call::log(42, 1000)) + ).unwrap(); // same id results in error. - assert!(Scheduler::do_schedule_named(1u32.encode(), 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).is_err()); + assert!(Scheduler::do_schedule_named( + 1u32.encode(), 4, None, 127, root(), Call::Logger(logger::Call::log(69, 1000)) + ).is_err()); // different id is ok. - Scheduler::do_schedule_named(2u32.encode(), 8, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); + Scheduler::do_schedule_named( + 2u32.encode(), 8, None, 127, root(), Call::Logger(logger::Call::log(69, 1000)) + ).unwrap(); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(6); - assert_ok!(Scheduler::do_cancel_named(1u32.encode())); + assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); run_to_block(100); - assert_eq!(logger::log(), vec![42u32, 69u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } #[test] fn scheduler_respects_weight_limits() { new_test_ext().execute_with(|| { - let _ = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - let _ = Scheduler::do_schedule(4, None, 127, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule( + 4, None, 127, root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + ); + let _ = Scheduler::do_schedule( + 4, None, 127, root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + ); // 69 and 42 do not fit together run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); + assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(5); - assert_eq!(logger::log(), vec![42u32, 69u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } #[test] fn scheduler_respects_hard_deadlines_more() { new_test_ext().execute_with(|| { - let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule( + 4, None, 0, root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + ); + let _ = Scheduler::do_schedule( + 4, None, 0, root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + ); // With base weights, 69 and 42 should not fit together, but do because of hard deadlines run_to_block(4); - assert_eq!(logger::log(), vec![42u32, 69u32]); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); } #[test] fn scheduler_respects_priority_ordering() { new_test_ext().execute_with(|| { - let _ = Scheduler::do_schedule(4, None, 1, Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2))); - let _ = Scheduler::do_schedule(4, None, 0, Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2))); + let _ = Scheduler::do_schedule( + 4, None, 1, root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + ); + let _ = Scheduler::do_schedule( + 4, None, 0, root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + ); run_to_block(4); - assert_eq!(logger::log(), vec![69u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); }); } @@ -667,30 +831,21 @@ mod tests { fn scheduler_respects_priority_ordering_with_soft_deadlines() { new_test_ext().execute_with(|| { let _ = Scheduler::do_schedule( - 4, - None, - 255, - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)), + 4, None, 255, root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)) ); let _ = Scheduler::do_schedule( - 4, - None, - 127, - Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)), + 4, None, 127, root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) ); let _ = Scheduler::do_schedule( - 4, - None, - 126, - Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)), + 4, None, 126, root(), Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)) ); // 2600 does not fit with 69 or 42, but has higher priority, so will go through run_to_block(4); - assert_eq!(logger::log(), vec![2600u32]); + assert_eq!(logger::log(), vec![(root(), 2600u32)]); // 69 and 42 fit together run_to_block(5); - assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); }); } @@ -703,47 +858,45 @@ mod tests { let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); // Named - assert_ok!(Scheduler::do_schedule_named(1u32.encode(), 1, None, 255, Call::Logger(logger::Call::log(3, MaximumSchedulerWeight::get() / 3)))); + assert_ok!( + Scheduler::do_schedule_named( + 1u32.encode(), 1, None, 255, root(), + Call::Logger(logger::Call::log(3, MaximumSchedulerWeight::get() / 3)) + ) + ); // Anon Periodic let _ = Scheduler::do_schedule( - 1, - Some((1000, 3)), - 128, - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)), + 1, Some((1000, 3)), 128, root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)) ); // Anon let _ = Scheduler::do_schedule( - 1, - None, - 127, - Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)), + 1, None, 127, root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) ); // Named Periodic assert_ok!(Scheduler::do_schedule_named( - 2u32.encode(), - 1, - Some((1000, 3)), - 126, - Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)), - )); + 2u32.encode(), 1, Some((1000, 3)), 126, root(), + Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2))) + ); // Will include the named periodic only let actual_weight = Scheduler::on_initialize(1); let call_weight = MaximumSchedulerWeight::get() / 2; - assert_eq!(actual_weight, call_weight + base_weight + base_multiplier + named_multiplier + periodic_multiplier); - assert_eq!(logger::log(), vec![2600u32]); + assert_eq!( + actual_weight, call_weight + base_weight + base_multiplier + named_multiplier + periodic_multiplier + ); + assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic let actual_weight = Scheduler::on_initialize(2); let call_weight = MaximumSchedulerWeight::get() / 2 + MaximumSchedulerWeight::get() / 3; assert_eq!(actual_weight, call_weight + base_weight + base_multiplier * 2 + periodic_multiplier); - assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32]); + assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only let actual_weight = Scheduler::on_initialize(3); let call_weight = MaximumSchedulerWeight::get() / 3; assert_eq!(actual_weight, call_weight + base_weight + base_multiplier + named_multiplier); - assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32, 3u32]); + assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)]); // Will contain none let actual_weight = Scheduler::on_initialize(4); @@ -794,4 +947,169 @@ mod tests { ); }); } + + #[test] + fn should_use_orign() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); + let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); + assert_ok!( + Scheduler::schedule_named(system::RawOrigin::Signed(1).into(), 1u32.encode(), 4, None, 127, call) + ); + assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2)); + run_to_block(3); + // Scheduled calls are in the agenda. + assert_eq!(Agenda::::get(4).len(), 2); + assert!(logger::log().is_empty()); + assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), 1u32.encode())); + assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); + // Scheduled calls are made NONE, so should not effect state + run_to_block(100); + assert!(logger::log().is_empty()); + }); + } + + #[test] + fn should_check_orign() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); + let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); + assert_noop!( + Scheduler::schedule_named(system::RawOrigin::Signed(2).into(), 1u32.encode(), 4, None, 127, call), + BadOrigin + ); + assert_noop!(Scheduler::schedule(system::RawOrigin::Signed(2).into(), 4, None, 127, call2), BadOrigin); + }); + } + + #[test] + fn should_check_orign_for_cancel() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Logger(logger::Call::log_without_filter(69, 1000))); + let call2 = Box::new(Call::Logger(logger::Call::log_without_filter(42, 1000))); + assert_ok!( + Scheduler::schedule_named(system::RawOrigin::Signed(1).into(), 1u32.encode(), 4, None, 127, call) + ); + assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2)); + run_to_block(3); + // Scheduled calls are in the agenda. + assert_eq!(Agenda::::get(4).len(), 2); + assert!(logger::log().is_empty()); + assert_noop!(Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), BadOrigin); + assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); + assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), BadOrigin); + assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); + run_to_block(5); + assert_eq!( + logger::log(), + vec![(system::RawOrigin::Signed(1).into(), 69u32), (system::RawOrigin::Signed(1).into(), 42u32)] + ); + }); + } + + #[test] + fn migration_to_v2_works() { + use substrate_test_utils::assert_eq_uvec; + + new_test_ext().execute_with(|| { + for i in 0..3u64 { + let k = i.twox_64_concat(); + let old = vec![ + Some(ScheduledV1 { + maybe_id: None, + priority: i as u8 + 10, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + }), + None, + Some(ScheduledV1 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + }), + ]; + frame_support::migration::put_storage_value( + b"Scheduler", + b"Agenda", + &k, + old, + ); + } + + assert_eq!(StorageVersion::get(), Releases::V1); + + assert!(Scheduler::migrate_v1_to_t2()); + + assert_eq_uvec!(Agenda::::iter().collect::>(), vec![ + ( + 0, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 10, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ]), + ( + 1, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 11, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 2, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 12, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ) + ]); + + assert_eq!(StorageVersion::get(), Releases::V2); + }); + } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d9a3561802..56aaed0836 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2310,6 +2310,8 @@ mod tests { } pub mod system { + use codec::{Encode, Decode}; + pub trait Trait { type AccountId; type Call; @@ -2317,7 +2319,7 @@ mod tests { type Origin: crate::traits::OriginTrait; } - #[derive(Clone, PartialEq, Eq, Debug)] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub enum RawOrigin { Root, Signed(AccountId), diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index d6ec9a7373..dca365ff8c 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -316,7 +316,7 @@ mod tests { } ); - #[derive(Clone, PartialEq, Eq, Debug)] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub enum RawOrigin { Root, Signed(AccountId), diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index 77fe86cc55..ba9af6c982 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -222,10 +222,14 @@ macro_rules! impl_outer_origin { fn filter_call(&self, call: &Self::Call) -> bool { (self.filter)(call) } + + fn caller(&self) -> &Self::PalletsOrigin { + &self.caller + } } $crate::paste::item! { - #[derive(Clone, PartialEq, Eq, $crate::RuntimeDebug)] + #[derive(Clone, PartialEq, Eq, $crate::RuntimeDebug, $crate::codec::Encode, $crate::codec::Decode)] $(#[$attr])* #[allow(non_camel_case_types)] pub enum $caller_name { @@ -255,13 +259,25 @@ macro_rules! impl_outer_origin { } } + impl From<$system::Origin<$runtime>> for $caller_name { + fn from(x: $system::Origin<$runtime>) -> Self { + $caller_name::system(x) + } + } impl From<$system::Origin<$runtime>> for $name { /// Convert to runtime origin: /// * root origin is built with no filter /// * others use `frame-system::Trait::BaseCallFilter` fn from(x: $system::Origin<$runtime>) -> Self { + let o: $caller_name = x.into(); + o.into() + } + } + + impl From<$caller_name> for $name { + fn from(x: $caller_name) -> Self { let mut o = $name { - caller: $caller_name::system(x), + caller: x, filter: $crate::sp_std::rc::Rc::new(Box::new(|_| true)), }; @@ -273,6 +289,7 @@ macro_rules! impl_outer_origin { o } } + impl Into<$crate::sp_std::result::Result<$system::Origin<$runtime>, $name>> for $name { /// NOTE: converting to pallet origin loses the origin filter information. fn into(self) -> $crate::sp_std::result::Result<$system::Origin<$runtime>, Self> { @@ -290,17 +307,20 @@ macro_rules! impl_outer_origin { <$system::Origin<$runtime>>::from(x).into() } } + $( $crate::paste::item! { + impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $caller_name { + fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { + $caller_name::[< $module $( _ $generic_instance )? >](x) + } + } + impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $name { /// Convert to runtime origin using `frame-system::Trait::BaseCallFilter`. fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { - let mut o = $name { - caller: $caller_name::[< $module $( _ $generic_instance )? >](x), - filter: $crate::sp_std::rc::Rc::new(Box::new(|_| true)), - }; - $crate::traits::OriginTrait::reset_filter(&mut o); - o + let x: $caller_name = x.into(); + x.into() } } impl Into< @@ -328,15 +348,18 @@ macro_rules! impl_outer_origin { #[cfg(test)] mod tests { + use codec::{Encode, Decode}; use crate::traits::{Filter, OriginTrait}; mod system { + use super::*; + pub trait Trait { type AccountId; type Call; type BaseCallFilter; } - #[derive(Clone, PartialEq, Eq, Debug)] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub enum RawOrigin { Root, Signed(AccountId), @@ -356,18 +379,22 @@ mod tests { } mod origin_without_generic { - #[derive(Clone, PartialEq, Eq, Debug)] + use super::*; + + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct Origin; } mod origin_with_generic { - #[derive(Clone, PartialEq, Eq, Debug)] + use super::*; + + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct Origin { t: T } } - #[derive(Clone, PartialEq, Eq, Debug)] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct TestRuntime; pub struct BaseCallFilter; diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index f7e7710b32..e0b2f256f0 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1500,7 +1500,7 @@ pub mod schedule { pub const LOWEST_PRIORITY: Priority = 255; /// A type that can be used as a scheduler. - pub trait Anon { + pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug; @@ -1513,6 +1513,7 @@ pub mod schedule { when: BlockNumber, maybe_periodic: Option>, priority: Priority, + origin: Origin, call: Call ) -> Result; @@ -1530,7 +1531,7 @@ pub mod schedule { } /// A type that can be used as a scheduler. - pub trait Named { + pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; @@ -1542,6 +1543,7 @@ pub mod schedule { when: BlockNumber, maybe_periodic: Option>, priority: Priority, + origin: Origin, call: Call ) -> Result; @@ -1605,6 +1607,9 @@ pub trait OriginTrait: Sized { /// Filter the call, if false then call is filtered out. fn filter_call(&self, call: &Self::Call) -> bool; + + /// Get the caller. + fn caller(&self) -> &Self::PalletsOrigin; } /// Trait to be used when types are exactly same. diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 920554346f..dde2e0ca9f 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -17,6 +17,7 @@ #![recursion_limit="128"] +use codec::{Codec, EncodeLike, Encode, Decode}; use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Block as _, Verify}}; use frame_support::{ Parameter, traits::Get, parameter_types, @@ -44,7 +45,7 @@ mod module1 { type Event: From> + Into<::Event>; type Origin: From>; type SomeParameter: Get; - type GenericType: Default + Clone + codec::Codec + codec::EncodeLike; + type GenericType: Default + Clone + Codec + EncodeLike; } frame_support::decl_module! { @@ -87,7 +88,7 @@ mod module1 { } } - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] pub enum Origin, I> where T::BlockNumber: From { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), @@ -148,7 +149,7 @@ mod module2 { } } - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] pub enum Origin, I=DefaultInstance> { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index 0d6a22fd1a..8ca2e97789 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -57,7 +57,7 @@ frame_support::decl_error! { } /// Origin for the system module. -#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] pub enum RawOrigin { Root, Signed(AccountId), diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index dc103b204d..3536d6fc71 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -301,7 +301,7 @@ pub struct EventRecord { } /// Origin for the System module. -#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] pub enum RawOrigin { /// The system itself ordained this dispatch to happen: this is the highest privilege level. Root, -- GitLab From b3fdde6582657be3afc56f4dd2b946c762c6ba0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 2 Jul 2020 15:17:14 +0200 Subject: [PATCH 115/144] Move `create_inherents` into the block-builder (#6553) * Move `create_inherents` into the block-builder This moves the `create_inherents` call into the block-builder. This has the advantage that `create_inherents` will be able to reuse the same context that will be used when applying the extrinsics and we also save one call to `on_initialize`. To make sure that `create_inherents` does not modify any state, we execute it in a transaction that is rolled-back after doing the runtime call. * Feedback and build fix * Update primitives/runtime/src/lib.rs Co-authored-by: Sergei Shulepov * Update client/block-builder/src/lib.rs Co-authored-by: Sergei Shulepov --- Cargo.lock | 1 + .../basic-authorship/src/basic_authorship.rs | 11 +----- client/block-builder/Cargo.toml | 1 + client/block-builder/src/lib.rs | 39 ++++++++++++++++--- frame/support/src/storage/mod.rs | 9 +---- .../api/proc-macro/src/impl_runtime_apis.rs | 22 +++++------ .../proc-macro/src/mock_impl_runtime_apis.rs | 8 ++-- primitives/api/src/lib.rs | 18 ++++----- primitives/runtime/src/lib.rs | 17 ++++++++ 9 files changed, 78 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 540ddaa4a1..9ea9b89057 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5994,6 +5994,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-inherents", "sp-runtime", "sp-state-machine", "sp-trie", diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 383d0ea6fc..7343b13c04 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -26,7 +26,6 @@ use codec::Decode; use sp_consensus::{evaluation, Proposal, RecordProof}; use sp_inherents::InherentData; use log::{error, info, debug, trace, warn}; -use sp_core::ExecutionContext; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, @@ -200,15 +199,7 @@ impl Proposer record_proof, )?; - // We don't check the API versions any further here since the dispatch compatibility - // check should be enough. - for inherent in self.client.runtime_api() - .inherent_extrinsics_with_context( - &self.parent_id, - ExecutionContext::BlockConstruction, - inherent_data - )? - { + for inherent in block_builder.create_inherents(inherent_data)? { match block_builder.push(inherent) { Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => warn!("⚠️ Dropping non-mandatory inherent from overweight block."), diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 1e733355f7..a56ff61cd0 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -20,6 +20,7 @@ sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/commo sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } sp-block-builder = { version = "2.0.0-rc4", path = "../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-rc4", path = "../../primitives/inherents" } sc-client-api = { version = "2.0.0-rc4", path = "../api" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index af40b33662..904667b1af 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -34,7 +34,10 @@ use sp_runtime::{ }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof}; +use sp_api::{ + Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, + TransactionOutcome, +}; use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -156,17 +159,22 @@ where let block_id = &self.block_id; let extrinsics = &mut self.extrinsics; - self.api.map_api_result(|api| { + self.api.execute_in_transaction(|api| { match api.apply_extrinsic_with_context( block_id, ExecutionContext::BlockConstruction, xt.clone(), - )? { - Ok(_) => { + ) { + Ok(Ok(_)) => { extrinsics.push(xt); - Ok(()) + TransactionOutcome::Commit(Ok(())) } - Err(tx_validity) => Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + Ok(Err(tx_validity)) => { + TransactionOutcome::Rollback( + Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + ) + }, + Err(e) => TransactionOutcome::Rollback(Err(e)), } }) } @@ -212,6 +220,25 @@ where proof, }) } + + /// Create the inherents for the block. + /// + /// Returns the inherents created by the runtime or an error if something failed. + pub fn create_inherents( + &mut self, + inherent_data: sp_inherents::InherentData, + ) -> Result, ApiErrorFor> { + let block_id = self.block_id; + self.api.execute_in_transaction(move |api| { + // `create_inherents` should not change any state, to ensure this we always rollback + // the transaction. + TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( + &block_id, + ExecutionContext::BlockConstruction, + inherent_data + )) + }) + } } #[cfg(test)] diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index c2d7ceef0f..b8b08c5dc0 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -21,6 +21,7 @@ use sp_std::{prelude::*, marker::PhantomData}; use codec::{FullCodec, FullEncode, Encode, EncodeLike, Decode}; use crate::hash::{Twox128, StorageHasher}; use sp_runtime::generic::{Digest, DigestItem}; +pub use sp_runtime::TransactionOutcome; pub mod unhashed; pub mod hashed; @@ -29,14 +30,6 @@ pub mod child; pub mod generator; pub mod migration; -/// Describes whether a storage transaction should be committed or rolled back. -pub enum TransactionOutcome { - /// Transaction should be committed. - Commit(T), - /// Transaction should be rolled back. - Rollback(T), -} - /// Execute the supplied function in a new storage transaction. /// /// All changes to storage performed by the supplied function are discarded if the returned diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index a4c35dcf42..97b159e6f0 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -253,18 +253,18 @@ fn generate_runtime_api_base_structures() -> Result { { type StateBackend = C::StateBackend; - fn map_api_result std::result::Result, R, E>( + fn execute_in_transaction #crate_::TransactionOutcome, R>( &self, - map_call: F, - ) -> std::result::Result where Self: Sized { + call: F, + ) -> R where Self: Sized { self.changes.borrow_mut().start_transaction(); *self.commit_on_success.borrow_mut() = false; - let res = map_call(self); + let res = call(self); *self.commit_on_success.borrow_mut() = true; - self.commit_on_ok(&res); + self.commit_or_rollback(matches!(res, #crate_::TransactionOutcome::Commit(_))); - res + res.into_inner() } fn has_api( @@ -380,21 +380,21 @@ fn generate_runtime_api_base_structures() -> Result { &self.recorder, ); - self.commit_on_ok(&res); + self.commit_or_rollback(res.is_ok()); res } - fn commit_on_ok(&self, res: &std::result::Result) { + fn commit_or_rollback(&self, commit: bool) { let proof = "\ We only close a transaction when we opened one ourself. Other parts of the runtime that make use of transactions (state-machine) also balance their transactions. The runtime cannot close client initiated transactions. qed"; if *self.commit_on_success.borrow() { - if res.is_err() { - self.changes.borrow_mut().rollback_transaction().expect(proof); - } else { + if commit { self.changes.borrow_mut().commit_transaction().expect(proof); + } else { + self.changes.borrow_mut().rollback_transaction().expect(proof); } } } diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 028ef57939..0e8f18e3e6 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -73,11 +73,11 @@ fn implement_common_api_traits( impl #crate_::ApiExt<#block_type> for #self_ty { type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; - fn map_api_result std::result::Result, R, E>( + fn execute_in_transaction #crate_::TransactionOutcome, R>( &self, - map_call: F, - ) -> std::result::Result where Self: Sized { - map_call(self) + call: F, + ) -> R where Self: Sized { + call(self).into_inner() } fn has_api( diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 0aaf72e2d2..bad6c03058 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -58,7 +58,7 @@ pub use sp_runtime::{ Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HashFor, NumberFor, Header as HeaderT, Hash as HashT, }, - generic::BlockId, transaction_validity::TransactionValidity, RuntimeString, + generic::BlockId, transaction_validity::TransactionValidity, RuntimeString, TransactionOutcome, }; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; @@ -356,15 +356,15 @@ pub trait ApiExt: ApiErrorExt { /// The state backend that is used to store the block states. type StateBackend: StateBackend>; - /// The given closure will be called with api instance. Inside the closure any api call is - /// allowed. After doing the api call, the closure is allowed to map the `Result` to a - /// different `Result` type. This can be important, as the internal data structure that keeps - /// track of modifications to the storage, discards changes when the `Result` is an `Err`. - /// On `Ok`, the structure commits the changes to an internal buffer. - fn map_api_result result::Result, R, E>( + /// Execute the given closure inside a new transaction. + /// + /// Depending on the outcome of the closure, the transaction is committed or rolled-back. + /// + /// The internal result of the closure is returned afterwards. + fn execute_in_transaction TransactionOutcome, R>( &self, - map_call: F, - ) -> result::Result where Self: Sized; + call: F, + ) -> R where Self: Sized; /// Checks if the given api is implemented and versions match. fn has_api( diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index b27cb0c633..02031a2df9 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -801,6 +801,23 @@ impl Drop for SignatureBatching { } } +/// Describes on what should happen with a storage transaction. +pub enum TransactionOutcome { + /// Commit the transaction. + Commit(R), + /// Rollback the transaction. + Rollback(R), +} + +impl TransactionOutcome { + /// Convert into the inner type. + pub fn into_inner(self) -> R { + match self { + Self::Commit(r) => r, + Self::Rollback(r) => r, + } + } +} #[cfg(test)] mod tests { -- GitLab From cf2367f6d9a75feafe78f621f13ca60c75da9fc5 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 2 Jul 2020 15:18:08 +0200 Subject: [PATCH 116/144] client/network: Remove unused Result returned by NetworkWorker (#6552) --- client/network/src/service.rs | 6 +++--- client/network/test/src/lib.rs | 6 +++--- client/service/src/lib.rs | 4 +--- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 2ef6b7bc21..c669c809a1 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -62,7 +62,7 @@ use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use std::{ borrow::{Borrow, Cow}, collections::HashSet, - fs, io, + fs, marker::PhantomData, num:: NonZeroUsize, pin::Pin, @@ -1111,7 +1111,7 @@ impl Metrics { } impl Future for NetworkWorker { - type Output = Result<(), io::Error>; + type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { let this = &mut *self; @@ -1138,7 +1138,7 @@ impl Future for NetworkWorker { // Process the next message coming from the `NetworkService`. let msg = match this.from_worker.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(Ok(())), + Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, }; diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 2896c4e3e1..d0f1d4752b 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -861,13 +861,13 @@ pub trait TestNetFactory: Sized { ); } - /// Polls the testnet. Processes all the pending actions and returns `NotReady`. + /// Polls the testnet. Processes all the pending actions. fn poll(&mut self, cx: &mut FutureContext) { self.mut_peers(|peers| { for peer in peers { trace!(target: "sync", "-- Polling {}", peer.id()); - if let Poll::Ready(res) = Pin::new(&mut peer.network).poll(cx) { - res.unwrap(); + if let Poll::Ready(()) = peer.network.poll_unpin(cx) { + panic!("NetworkWorker has terminated unexpectedly.") } trace!(target: "sync", "-- Polling complete {}", peer.id()); diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1d41490956..978b77974f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -307,9 +307,7 @@ fn build_network_future< }); // Main network polling. - if let Poll::Ready(Ok(())) = Pin::new(&mut network).poll(cx).map_err(|err| { - warn!(target: "service", "Error in network: {:?}", err); - }) { + if let Poll::Ready(()) = network.poll_unpin(cx) { return Poll::Ready(()); } -- GitLab From facc6741400a21fa3b5a3eedfe444f30ab5df8b0 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 3 Jul 2020 11:49:42 +0200 Subject: [PATCH 117/144] New testing helpers (#6555) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial commit Forked at: 8ef1ac0ee13d2a72cc1c391d4624dfaaafe641e8 Parent branch: origin/master * Add send_transaction to RpcHandlers * Extension trait for RpcHandlers * Revert "Add send_transaction to RpcHandlers" This reverts commit 03c89e13d404bae3f3123387dd50f026061bca82. * Add an extension trait for BlockchainEvents * Update test-utils/client/src/lib.rs Co-authored-by: Bastian Köcher * Update test-utils/client/src/lib.rs * fix * deps fix Co-authored-by: Bastian Köcher --- Cargo.lock | 2 + test-utils/client/Cargo.toml | 2 + test-utils/client/src/lib.rs | 87 ++++++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9ea9b89057..b5bbcd6954 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8309,8 +8309,10 @@ dependencies = [ name = "substrate-test-client" version = "2.0.0-rc4" dependencies = [ + "futures 0.1.29", "futures 0.3.5", "hash-db", + "hex", "parity-scale-codec", "sc-client-api", "sc-client-db", diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index a9d8590f02..e9036bc77a 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -20,7 +20,9 @@ sc-executor = { version = "0.8.0-rc4", path = "../../client/executor" } sc-consensus = { version = "0.8.0-rc4", path = "../../client/consensus/common" } sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../client/service" } futures = "0.3.4" +futures01 = { package = "futures", version = "0.1.29" } hash-db = "0.15.2" +hex = "0.4" sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } codec = { package = "parity-scale-codec", version = "1.3.1" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 2ab9e4066d..fef9acd9d2 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -36,14 +36,17 @@ pub use sp_keyring::{ pub use sp_core::{traits::BareCryptoStorePtr, tasks::executor as tasks_executor}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; -pub use sc_service::client; +pub use sc_service::{RpcHandlers, RpcSession, client}; pub use self::client_ext::{ClientExt, ClientBlockImportExt}; +use std::pin::Pin; use std::sync::Arc; -use std::collections::HashMap; +use std::collections::{HashSet, HashMap}; +use futures::{future::{Future, FutureExt}, stream::StreamExt}; use sp_core::storage::ChildInfo; -use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; +use sp_runtime::{OpaqueExtrinsic, codec::Encode, traits::{Block as BlockT, BlakeTwo256}}; use sc_service::client::{LocalCallExecutor, ClientConfig}; +use sc_client_api::BlockchainEvents; /// Test client light database backend. pub type LightBackend = sc_light::Backend< @@ -255,3 +258,81 @@ impl TestClientBuilder< self.build_with_executor(executor) } } + +/// An extension trait for `RpcHandlers`. +pub trait RpcHandlersExt { + /// Send a transaction through the RpcHandlers. + fn send_transaction( + &self, + extrinsic: OpaqueExtrinsic, + ) -> Pin, + RpcSession, + futures01::sync::mpsc::Receiver, + ), + > + Send>>; +} + +impl RpcHandlersExt for RpcHandlers { + fn send_transaction( + &self, + extrinsic: OpaqueExtrinsic, + ) -> Pin, + RpcSession, + futures01::sync::mpsc::Receiver, + ), + > + Send>> { + let (tx, rx) = futures01::sync::mpsc::channel(0); + let mem = RpcSession::new(tx.into()); + Box::pin(self + .rpc_query( + &mem, + &format!( + r#"{{ + "jsonrpc": "2.0", + "method": "author_submitExtrinsic", + "params": ["0x{}"], + "id": 0 + }}"#, + hex::encode(extrinsic.encode()) + ), + ) + .map(move |res| (res, mem, rx))) + } +} + +/// An extension trait for `BlockchainEvents`. +pub trait BlockchainEventsExt +where + C: BlockchainEvents, + B: BlockT, +{ + /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks + /// are ever created, thus you should restrict the maximum amount of time of the test execution. + fn wait_for_blocks(&self, count: usize) -> Pin + Send>>; +} + +impl BlockchainEventsExt for C +where + C: BlockchainEvents, + B: BlockT, +{ + fn wait_for_blocks(&self, count: usize) -> Pin + Send>> { + assert!(count > 0, "'count' argument must be greater than 0"); + + let mut import_notification_stream = self.import_notification_stream(); + let mut blocks = HashSet::new(); + + Box::pin(async move { + while let Some(notification) = import_notification_stream.next().await { + blocks.insert(notification.hash); + if blocks.len() == count { + break; + } + } + }) + } +} -- GitLab From 7e3cf944cf87a22036cc26f03bd859855b7ed3f1 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 3 Jul 2020 17:29:08 +0200 Subject: [PATCH 118/144] Remove polkadot companion detection from branch name (#6565) * Initial commit Forked at: facc6741400a21fa3b5a3eedfe444f30ab5df8b0 Parent branch: origin/master * Remove polkadot companion detection from branch name Even though it was nice it was also error prone as there were no indication whatsoever on the PR that a polkadot companion branch exists. --- .maintain/gitlab/check_polkadot_companion_build.sh | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 26ee73ef71..e68ce45b3f 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -87,15 +87,7 @@ then git checkout pr/${pr_companion} git merge origin/master else - pr_ref="$(grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*' "${pr_data_file}")" - if git fetch origin "$pr_ref":branch/"$pr_ref" 2>/dev/null - then - boldprint "companion branch detected: $pr_ref" - git checkout branch/"$pr_ref" - git merge origin/master - else - boldprint "no companion branch found - building polkadot:master" - fi + boldprint "no companion branch found - building polkadot:master" fi rm -f "${pr_data_file}" else -- GitLab From 14f3516727c080bb0f6b5a9700a74c6f2ed1f2cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 3 Jul 2020 18:07:46 +0100 Subject: [PATCH 119/144] primitives: use generic Header in testing runtime (#6561) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * primitives: use generic Header in testing runtime * frame: remove unused imports * Remove warning Co-authored-by: Bastian Köcher --- client/db/src/cache/list_cache.rs | 5 +-- client/db/src/cache/list_entry.rs | 6 ++- frame/finality-tracker/src/lib.rs | 5 ++- frame/grandpa/src/mock.rs | 4 +- frame/grandpa/src/tests.rs | 2 +- primitives/runtime/src/testing.rs | 66 +++---------------------------- 6 files changed, 18 insertions(+), 70 deletions(-) diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index 0856350fb0..15ad339b1f 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -860,16 +860,15 @@ fn read_forks>( } #[cfg(test)] -pub mod tests { +mod tests { use substrate_test_runtime_client::runtime::H256; use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use sp_runtime::traits::Header as HeaderT; use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; use super::*; type Block = RawBlock>; - pub fn test_id(number: u64) -> ComplexBlockId { + fn test_id(number: u64) -> ComplexBlockId { ComplexBlockId::new(H256::from_low_u64_be(number), number) } diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs index 565a62cff4..d14fab9274 100644 --- a/client/db/src/cache/list_entry.rs +++ b/client/db/src/cache/list_entry.rs @@ -117,10 +117,14 @@ impl StorageEntry { #[cfg(test)] mod tests { - use crate::cache::list_cache::tests::test_id; use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; + use substrate_test_runtime_client::runtime::{H256, Block}; use super::*; + fn test_id(number: u64) -> ComplexBlockId { + ComplexBlockId::new(H256::from_low_u64_be(number), number) + } + #[test] fn entry_try_update_works() { // when trying to update with None value diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index e5ed9574e5..aa692e65a8 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -209,8 +209,9 @@ mod tests { use sp_io::TestExternalities; use sp_core::H256; use sp_runtime::{ - testing::Header, Perbill, - traits::{BlakeTwo256, IdentityLookup, Header as HeaderT}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; use frame_support::{ assert_ok, impl_outer_origin, parameter_types, diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 0f3122c860..7da32c5958 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -40,8 +40,8 @@ use sp_runtime::{ impl_opaque_keys, testing::{Header, TestXt, UintAuthorityId}, traits::{ - Convert, Extrinsic as ExtrinsicT, Header as _, IdentityLookup, OpaqueKeys, - SaturatedConversion, SignedExtension, + Convert, Extrinsic as ExtrinsicT, IdentityLookup, OpaqueKeys, SaturatedConversion, + SignedExtension, }, transaction_validity::TransactionValidityError, DigestItem, Perbill, diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 2337e00e8d..5f901f2276 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -30,7 +30,7 @@ use frame_support::{ use frame_system::{EventRecord, Phase}; use sp_core::H256; use sp_keyring::Ed25519Keyring; -use sp_runtime::{testing::Digest, traits::Header}; +use sp_runtime::testing::Digest; #[test] fn authorities_change_logged() { diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 1b826ace99..eefb36ae82 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -160,77 +160,21 @@ pub type DigestItem = generic::DigestItem; pub type Digest = generic::Digest; /// Block Header -#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, Default, parity_util_mem::MallocSizeOf)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct Header { - /// Parent hash - pub parent_hash: H256, - /// Block Number - pub number: u64, - /// Post-execution state trie root - pub state_root: H256, - /// Merkle root of block's extrinsics - pub extrinsics_root: H256, - /// Digest items - pub digest: Digest, -} - -impl traits::Header for Header { - type Number = u64; - type Hashing = BlakeTwo256; - type Hash = H256; - - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } - - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } - - fn digest(&self) -> &Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Digest { &mut self.digest } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Digest, - ) -> Self { - Header { - number, - extrinsics_root, - state_root, - parent_hash, - digest, - } - } -} +pub type Header = generic::Header; impl Header { /// A new header with the given number and default hash for all other fields. pub fn new_from_number(number: ::Number) -> Self { Self { number, - ..Default::default() + extrinsics_root: Default::default(), + state_root: Default::default(), + parent_hash: Default::default(), + digest: Default::default(), } } } -impl<'a> Deserialize<'a> for Header { - fn deserialize>(de: D) -> Result { - let r = >::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) - } -} - /// An opaque extrinsic wrapper type. #[derive(PartialEq, Eq, Clone, Debug, Encode, Decode, parity_util_mem::MallocSizeOf)] pub struct ExtrinsicWrapper(Xt); -- GitLab From 1e3a282ec18a99a1e5e699b50c726c284d3d88cb Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 3 Jul 2020 19:16:32 +0200 Subject: [PATCH 120/144] Remove polkadot companion detection from branch name (#6568) * Initial commit Forked at: 7e3cf944cf87a22036cc26f03bd859855b7ed3f1 Parent branch: origin/master * Remove polkadot companion detection from branch name Even though it was nice it was also error prone as there were no indication whatsoever on the PR that a polkadot companion branch exists. --- docs/CONTRIBUTING.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index ec747d6693..1d82a43921 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -69,7 +69,7 @@ To create a Polkadot companion PR: . Pull latest Polkadot master (or clone it, if you haven't yet). . Override your local cargo config to point to your local substrate (pointing to your WIP branch): place `paths = ["path/to/substrate"]` in `~/.cargo/config`. . Make the changes required and build polkadot locally. -. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" OR use the same name for your Polkdadot branch as the Substrate branch. +. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" . Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. . Wait for reviews on both . Once both PRs have been green lit, they can both be merged 🍻. -- GitLab From 65ab6f2e9c3281460c3316dc97e618c5275a4a05 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Fri, 3 Jul 2020 19:29:35 +0200 Subject: [PATCH 121/144] Make the encoded-Call Vec explicitly so in metadata (#6566) --- frame/multisig/src/lib.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index bcea34f9b3..388981cb8f 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -61,6 +61,8 @@ mod tests; mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +/// Just a bunch of bytes, but they should decode to a valid `Call`. +pub type OpaqueCall = Vec; /// Configuration trait. pub trait Trait: frame_system::Trait { @@ -122,7 +124,7 @@ decl_storage! { hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] => Option, T::AccountId>>; - pub Calls: map hasher(identity) [u8; 32] => Option<(Vec, T::AccountId, BalanceOf)>; + pub Calls: map hasher(identity) [u8; 32] => Option<(OpaqueCall, T::AccountId, BalanceOf)>; } } @@ -224,7 +226,7 @@ mod weight_of { } enum CallOrHash { - Call(Vec, bool), + Call(OpaqueCall, bool), Hash([u8; 32]), } @@ -357,7 +359,7 @@ decl_module! { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: Vec, + call: OpaqueCall, store_call: bool, max_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -630,9 +632,12 @@ impl Module { /// We store `data` here because storing `call` would result in needing another `.encode`. /// /// Returns a `bool` indicating whether the data did end up being stored. - fn store_call_and_reserve(who: T::AccountId, hash: &[u8; 32], data: Vec, other_deposit: BalanceOf) - -> DispatchResult - { + fn store_call_and_reserve( + who: T::AccountId, + hash: &[u8; 32], + data: OpaqueCall, + other_deposit: BalanceOf, + ) -> DispatchResult { ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); let deposit = other_deposit + T::DepositBase::get() + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); -- GitLab From 6189c9f8237b57686527a13edef2fdbb6f73a019 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Fri, 3 Jul 2020 20:07:11 +0100 Subject: [PATCH 122/144] ci: fix merge on polkadot companion job (#6574) --- .maintain/gitlab/check_polkadot_companion_build.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index e68ce45b3f..7a1943e0bd 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -50,7 +50,9 @@ SUBSTRATE_PATH=$(pwd) git merge origin/master # Clone the current Polkadot master branch into ./polkadot. -git clone --depth 1 https://github.com/paritytech/polkadot.git +# NOTE: we need to pull enough commits to be able to find a common +# ancestor for successfully performing merges below. +git clone --depth 20 https://github.com/paritytech/polkadot.git cd polkadot -- GitLab From e42d046d49872dc23296d44a3bd840351fdb4546 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 4 Jul 2020 11:57:50 +0200 Subject: [PATCH 123/144] pallet-evm: return Ok(()) when EVM execution fails (#6493) * pallet-evm: return Ok(()) when EVM execution fails * Bump spec version * Init test module * Add fail_call_return_ok test * Fix tests and use full match pattern Co-authored-by: Gav Wood --- frame/evm/src/lib.rs | 73 +++++++++++------- frame/evm/src/tests.rs | 169 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+), 29 deletions(-) create mode 100644 frame/evm/src/tests.rs diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index eebdc66b38..f7aa51e9ff 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -21,6 +21,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod backend; +mod tests; pub use crate::backend::{Account, Log, Vicinity, Backend}; @@ -144,7 +145,7 @@ pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { /// Precompiles associated with this EVM engine. type Precompiles: Precompiles; /// Chain ID of EVM. - type ChainId: Get; + type ChainId: Get; /// EVM config used in the module. fn config() -> &'static Config { @@ -201,6 +202,12 @@ decl_event! { Log(Log), /// A contract has been created at given address. Created(H160), + /// A contract was attempted to be created, but the execution failed. + CreatedFailed(H160), + /// A contract has been executed successfully with states applied. + Executed(H160), + /// A contract has been executed with errors. States are reverted with only gas fees applied. + ExecutedFailed(H160), /// A deposit has been made at a given address. BalanceDeposit(AccountId, H160, U256), /// A withdrawal has been made from a given address. @@ -220,12 +227,6 @@ decl_error! { WithdrawFailed, /// Gas price is too low. GasPriceTooLow, - /// Call failed - ExitReasonFailed, - /// Call reverted - ExitReasonRevert, - /// Call returned VM fatal error - ExitReasonFatal, /// Nonce is invalid InvalidNonce, } @@ -300,7 +301,7 @@ decl_module! { let sender = ensure_signed(origin)?; let source = T::ConvertAccountId::convert_account_id(&sender); - Self::execute_call( + match Self::execute_call( source, target, input, @@ -308,7 +309,16 @@ decl_module! { gas_limit, gas_price, nonce, - ).map_err(Into::into) + )? { + ExitReason::Succeed(_) => { + Module::::deposit_event(Event::::Executed(target)); + }, + ExitReason::Error(_) | ExitReason::Revert(_) | ExitReason::Fatal(_) => { + Module::::deposit_event(Event::::ExecutedFailed(target)); + }, + } + + Ok(()) } /// Issue an EVM create operation. This is similar to a contract creation transaction in @@ -327,16 +337,22 @@ decl_module! { let sender = ensure_signed(origin)?; let source = T::ConvertAccountId::convert_account_id(&sender); - let create_address = Self::execute_create( + match Self::execute_create( source, init, value, gas_limit, gas_price, nonce - )?; + )? { + (create_address, ExitReason::Succeed(_)) => { + Module::::deposit_event(Event::::Created(create_address)); + }, + (create_address, _) => { + Module::::deposit_event(Event::::CreatedFailed(create_address)); + }, + } - Module::::deposit_event(Event::::Created(create_address)); Ok(()) } @@ -356,7 +372,7 @@ decl_module! { let sender = ensure_signed(origin)?; let source = T::ConvertAccountId::convert_account_id(&sender); - let create_address = Self::execute_create2( + match Self::execute_create2( source, init, salt, @@ -364,9 +380,15 @@ decl_module! { gas_limit, gas_price, nonce - )?; + )? { + (create_address, ExitReason::Succeed(_)) => { + Module::::deposit_event(Event::::Created(create_address)); + }, + (create_address, _) => { + Module::::deposit_event(Event::::CreatedFailed(create_address)); + }, + } - Module::::deposit_event(Event::::Created(create_address)); Ok(()) } } @@ -413,7 +435,7 @@ impl Module { gas_limit: u32, gas_price: U256, nonce: Option - ) -> Result> { + ) -> Result<(H160, ExitReason), Error> { Self::execute_evm( source, value, @@ -442,7 +464,7 @@ impl Module { gas_limit: u32, gas_price: U256, nonce: Option - ) -> Result> { + ) -> Result<(H160, ExitReason), Error> { let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); Self::execute_evm( source, @@ -473,8 +495,8 @@ impl Module { gas_limit: u32, gas_price: U256, nonce: Option, - ) -> Result<(), Error> { - Self::execute_evm( + ) -> Result> { + Ok(Self::execute_evm( source, value, gas_limit, @@ -487,7 +509,7 @@ impl Module { input, gas_limit as usize, )), - ) + )?.1) } /// Execute an EVM operation. @@ -498,7 +520,7 @@ impl Module { gas_price: U256, nonce: Option, f: F, - ) -> Result> where + ) -> Result<(R, ExitReason), Error> where F: FnOnce(&mut StackExecutor>) -> (R, ExitReason), { let vicinity = Vicinity { @@ -527,19 +549,12 @@ impl Module { let (retv, reason) = f(&mut executor); - let ret = match reason { - ExitReason::Succeed(_) => Ok(retv), - ExitReason::Error(_) => Err(Error::::ExitReasonFailed), - ExitReason::Revert(_) => Err(Error::::ExitReasonRevert), - ExitReason::Fatal(_) => Err(Error::::ExitReasonFatal), - }; - let actual_fee = executor.fee(gas_price); executor.deposit(source, total_fee.saturating_sub(actual_fee)); let (values, logs) = executor.deconstruct(); backend.apply(values, logs, true); - ret + Ok((retv, reason)) } } diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs new file mode 100644 index 0000000000..b1f65e10e1 --- /dev/null +++ b/frame/evm/src/tests.rs @@ -0,0 +1,169 @@ +#![cfg(test)] + +use super::*; + +use std::{str::FromStr, collections::BTreeMap}; +use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, +}; +use sp_core::H256; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use sp_runtime::{ + Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +impl_outer_dispatch! { + pub enum OuterCall for Test where origin: Origin { + self::EVM, + } +} + +// For testing the pallet, we construct most of a mock runtime. This means +// first constructing a configuration type (`Test`) which `impl`s each of the +// configuration traits of pallets we want to use. +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = OuterCall; + type Hashing = BlakeTwo256; + type AccountId = H256; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +parameter_types! { + pub const MinimumPeriod: u64 = 1000; +} +impl pallet_timestamp::Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; +} + +/// Fixed gas price of `0`. +pub struct FixedGasPrice; +impl FeeCalculator for FixedGasPrice { + fn min_gas_price() -> U256 { + // Gas price is always one token per gas. + 0.into() + } +} +parameter_types! { + pub const EVMModuleId: ModuleId = ModuleId(*b"py/evmpa"); +} +impl Trait for Test { + type ChainId = SystemChainId; + type ModuleId = EVMModuleId; + type FeeCalculator = FixedGasPrice; + type ConvertAccountId = HashTruncateConvertAccountId; + type Currency = Balances; + type Event = Event; + type Precompiles = (); +} + +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type EVM = Module; + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut accounts = BTreeMap::new(); + accounts.insert( + H160::from_str("1000000000000000000000000000000000000001").unwrap(), + GenesisAccount { + nonce: U256::from(1), + balance: U256::from(1000000), + storage: Default::default(), + code: vec![ + 0x00, // STOP + ], + } + ); + accounts.insert( + H160::from_str("1000000000000000000000000000000000000002").unwrap(), + GenesisAccount { + nonce: U256::from(1), + balance: U256::from(1000000), + storage: Default::default(), + code: vec![ + 0xff, // INVALID + ], + } + ); + + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); + GenesisConfig { accounts }.assimilate_storage(&mut t).unwrap(); + t.into() +} + +#[test] +fn fail_call_return_ok() { + new_test_ext().execute_with(|| { + assert_ok!(EVM::call( + Origin::signed(H256::default()), + H160::from_str("1000000000000000000000000000000000000001").unwrap(), + Vec::new(), + U256::default(), + 1000000, + U256::default(), + None, + )); + + assert_ok!(EVM::call( + Origin::signed(H256::default()), + H160::from_str("1000000000000000000000000000000000000002").unwrap(), + Vec::new(), + U256::default(), + 1000000, + U256::default(), + None, + )); + }); +} -- GitLab From 18334ee173f0ec4b62d2ca05c5b0c1f0b05b1b17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sat, 4 Jul 2020 11:18:13 +0100 Subject: [PATCH 124/144] babe: report equivocations (#6362) * slots: create primitives crate for consensus slots * offences: add method to check if an offence is unknown * babe: initial equivocation reporting implementation * babe: organize imports * babe: working equivocation reporting * babe: add slot number to equivocation proof * session: move duplicate traits to session primitives * babe: move equivocation stuff to its own file * offences: fix test * session: don't have primitives depend on frame_support * babe: use opaque type for key owner proof * babe: cleanup client equivocation reporting * babe: cleanup equivocation code in pallet * babe: allow sending signed equivocation reports * node: fix compilation * fix test compilation * babe: return bool on check_equivocation_proof * babe: add test for equivocation reporting * babe: add more tests * babe: add test for validate unsigned * babe: take slot number in generate_key_ownership_proof API * babe: add benchmark for equivocation proof checking * session: add benchmark for membership proof checking * offences: fix babe benchmark * babe: add weights based on benchmark results * babe: adjust weights after benchmarking on reference hardware * babe: reorder checks in check_and_report_equivocation --- Cargo.lock | 19 + bin/node/cli/src/service.rs | 10 +- bin/node/runtime/Cargo.toml | 1 + bin/node/runtime/src/lib.rs | 41 ++- client/consensus/aura/src/lib.rs | 4 +- client/consensus/babe/src/lib.rs | 133 +++++-- client/consensus/babe/src/tests.rs | 12 +- client/consensus/slots/Cargo.toml | 1 + client/consensus/slots/src/aux_schema.rs | 35 +- frame/babe/Cargo.toml | 44 ++- frame/babe/src/benchmarking.rs | 108 ++++++ frame/babe/src/equivocation.rs | 271 +++++++++++++++ frame/babe/src/lib.rs | 251 ++++++++++---- frame/babe/src/mock.rs | 337 ++++++++++++++++-- frame/babe/src/tests.rs | 420 ++++++++++++++++++++++- frame/grandpa/src/equivocation.rs | 36 +- frame/grandpa/src/lib.rs | 7 +- frame/im-online/src/mock.rs | 8 +- frame/offences/benchmarking/src/lib.rs | 19 +- frame/offences/src/lib.rs | 9 + frame/offences/src/tests.rs | 71 ++++ frame/session/benchmarking/Cargo.toml | 3 + frame/session/benchmarking/src/lib.rs | 106 +++++- frame/staking/src/lib.rs | 4 + frame/support/src/lib.rs | 7 +- primitives/consensus/babe/Cargo.toml | 6 +- primitives/consensus/babe/src/digests.rs | 22 +- primitives/consensus/babe/src/lib.rs | 137 +++++++- primitives/consensus/slots/Cargo.toml | 23 ++ primitives/consensus/slots/src/lib.rs | 41 +++ primitives/core/src/lib.rs | 5 + primitives/session/src/lib.rs | 42 +++ primitives/staking/src/offence.rs | 13 +- test-utils/runtime/src/lib.rs | 52 ++- 34 files changed, 2027 insertions(+), 271 deletions(-) create mode 100644 frame/babe/src/benchmarking.rs create mode 100644 frame/babe/src/equivocation.rs create mode 100644 primitives/consensus/slots/Cargo.toml create mode 100644 primitives/consensus/slots/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index b5bbcd6954..58ea4e1077 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4009,9 +4009,15 @@ dependencies = [ name = "pallet-babe" version = "2.0.0-rc4" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-authorship", + "pallet-balances", + "pallet-offences", "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", "serde", @@ -4022,6 +4028,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", + "sp-session", "sp-staking", "sp-std", "sp-timestamp", @@ -4554,10 +4561,12 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", + "rand 0.7.3", "serde", "sp-core", "sp-io", "sp-runtime", + "sp-session", "sp-std", ] @@ -6343,6 +6352,7 @@ dependencies = [ "sp-application-crypto", "sp-blockchain", "sp-consensus", + "sp-consensus-slots", "sp-core", "sp-inherents", "sp-runtime", @@ -7568,6 +7578,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-consensus", + "sp-consensus-slots", "sp-consensus-vrf", "sp-core", "sp-inherents", @@ -7587,6 +7598,14 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-slots" +version = "0.8.0-rc4" +dependencies = [ + "parity-scale-codec", + "sp-runtime", +] + [[package]] name = "sp-consensus-vrf" version = "0.8.0-rc4" diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 9707e3d8ca..632092cdaa 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -83,7 +83,7 @@ macro_rules! new_full_start { let (grandpa_block_import, grandpa_link) = grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), - select_chain, + select_chain.clone(), )?; let justification_import = grandpa_block_import.clone(); @@ -99,6 +99,7 @@ macro_rules! new_full_start { Some(Box::new(justification_import)), None, client, + select_chain, inherent_data_providers.clone(), spawn_task_handle, prometheus_registry, @@ -367,14 +368,18 @@ pub fn new_light_base(config: Configuration) -> Result<( client, backend, fetcher, - _select_chain, + mut select_chain, _tx_pool, spawn_task_handle, registry, | { + let select_chain = select_chain.take() + .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let grandpa_block_import = grandpa::light_block_import( client.clone(), backend, @@ -398,6 +403,7 @@ pub fn new_light_base(config: Configuration) -> Result<( None, Some(Box::new(finality_proof_import)), client.clone(), + select_chain, inherent_data_providers.clone(), spawn_task_handle, registry, diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 568b1afb5e..1d29a592c4 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -149,6 +149,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7bec203f8c..70d001d62c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -270,6 +270,21 @@ impl pallet_babe::Trait for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = + pallet_babe::EquivocationHandler; } parameter_types! { @@ -808,7 +823,7 @@ construct_runtime!( { System: frame_system::{Module, Call, Config, Storage, Event}, Utility: pallet_utility::{Module, Call, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent(Timestamp), ValidateUnsigned}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, Indices: pallet_indices::{Module, Call, Storage, Config, Event}, @@ -985,6 +1000,29 @@ impl_runtime_apis! { fn current_epoch_start() -> sp_consensus_babe::SlotNumber { Babe::current_epoch_start() } + + fn generate_key_ownership_proof( + _slot_number: sp_consensus_babe::SlotNumber, + authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(sp_consensus_babe::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } } impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { @@ -1099,6 +1137,7 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); + add_benchmark!(params, batches, b"babe", Babe); add_benchmark!(params, batches, b"balances", Balances); add_benchmark!(params, batches, b"collective", Council); add_benchmark!(params, batches, b"democracy", Democracy); diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 8b30720d0b..19bc3bae6c 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -479,8 +479,8 @@ fn check_header( info!( "Slot author is equivocating at slot {} with headers {:?} and {:?}", slot_num, - equivocation_proof.fst_header().hash(), - equivocation_proof.snd_header().hash(), + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), ); } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 961b0382c5..af684499ce 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -720,27 +720,29 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, + select_chain: SelectChain, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, epoch_changes: SharedEpochChanges, time_source: TimeSource, } -impl BabeVerifier - where - Block: BlockT, - Client: HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi, +impl BabeVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + + BabeApi, + SelectChain: sp_consensus::SelectChain, { fn check_inherents( &self, block: Block, block_id: BlockId, inherent_data: InherentData, - ) -> Result<(), Error> - { + ) -> Result<(), Error> { let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -757,13 +759,95 @@ impl BabeVerifier Ok(()) } } + + fn check_and_report_equivocation( + &self, + slot_now: SlotNumber, + slot: SlotNumber, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // don't report any equivocations during initial sync + // as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()); + } + + // check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // get the best block on which we will build and send the equivocation report. + let best_id = self + .select_chain + .best_chain() + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // generate a key ownership proof. we start by trying to generate the + // key owernship proof at the parent of the equivocating header, this + // will make sure that proof generation is successful since it happens + // during the on-going session (i.e. session keys are available in the + // state to be able to generate the proof). this might fail if the + // equivocation happens on the first block of the session, in which case + // its parent would be on the previous session. if generation on the + // parent header fails we try with best block as well. + let generate_key_owner_proof = |block_id: &BlockId| { + self.client + .runtime_api() + .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) + .map_err(Error::Client) + }; + + let parent_id = BlockId::Hash(*header.parent_hash()); + let key_owner_proof = match generate_key_owner_proof(&parent_id)? { + Some(proof) => proof, + None => match generate_key_owner_proof(&best_id)? { + Some(proof) => proof, + None => { + debug!(target: "babe", "Equivocation offender is not part of the authority set."); + return Ok(()); + } + }, + }; + + // submit equivocation report at best block. + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + &best_id, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::Client)?; + + info!(target: "babe", "Submitted equivocation report for author {:?}", author); + + Ok(()) + } } -impl Verifier for BabeVerifier where +impl Verifier for BabeVerifier +where Block: BlockT, Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi - + Send + Sync + AuxStore + ProvideCache, + + Send + Sync + AuxStore + ProvideCache, Client::Api: BlockBuilderApi + BabeApi, + SelectChain: sp_consensus::SelectChain, { fn verify( &mut self, @@ -824,28 +908,18 @@ impl Verifier for BabeVerifier where CheckedHeader::Checked(pre_header, verified_info) => { let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); - let slot_number = babe_pre_digest.slot_number(); - let author = verified_info.author; - // the header is valid but let's check if there was something else already - // proposed at the same slot by the given author - if let Some(equivocation_proof) = check_equivocation( - &*self.client, + // proposed at the same slot by the given author. if there was, we will + // report the equivocation to the runtime. + self.check_and_report_equivocation( slot_now, - babe_pre_digest.slot_number(), + slot_number, &header, - &author, - ).map_err(|e| e.to_string())? { - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - babe_pre_digest.slot_number(), - equivocation_proof.fst_header().hash(), - equivocation_proof.snd_header().hash(), - ); - } + &verified_info.author, + &origin, + )?; // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents @@ -1284,12 +1358,13 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, finality_proof_import: Option>, client: Arc, + select_chain: SelectChain, inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, registry: Option<&Registry>, @@ -1299,11 +1374,13 @@ pub fn import_queue( Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, Client: HeaderBackend + HeaderMetadata, Client::Api: BlockBuilderApi + BabeApi + ApiExt, + SelectChain: sp_consensus::SelectChain + 'static, { register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; let verifier = BabeVerifier { client, + select_chain, inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 1caed18c17..958d7845ed 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -214,8 +214,13 @@ pub struct BabeTestNet { type TestHeader = ::Header; type TestExtrinsic = ::Extrinsic; +type TestSelectChain = substrate_test_runtime_client::LongestChain< + substrate_test_runtime_client::Backend, + TestBlock, +>; + pub struct TestVerifier { - inner: BabeVerifier, + inner: BabeVerifier, mutator: Mutator, } @@ -297,15 +302,20 @@ impl TestNetFactory for BabeTestNet { ) -> Self::Verifier { + use substrate_test_runtime_client::DefaultTestClientBuilderExt; + let client = client.as_full().expect("only full clients are used in test"); trace!(target: "babe", "Creating a verifier"); // ensure block import and verifier are linked correctly. let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); + let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + TestVerifier { inner: BabeVerifier { client: client.clone(), + select_chain: longest_chain, inherent_data_providers: data.inherent_data_providers.clone(), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 80eb83cca5..39a4a9d473 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -18,6 +18,7 @@ sc-client-api = { version = "2.0.0-rc4", path = "../../api" } sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0-rc4", path = "../../../primitives/application-crypto" } sp-blockchain = { version = "2.0.0-rc4", path = "../../../primitives/blockchain" } +sp-consensus-slots = { version = "0.8.0-rc4", path = "../../../primitives/consensus/slots" } sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.8.0-rc4", path = "../../../primitives/state-machine" } sp-api = { version = "2.0.0-rc4", path = "../../../primitives/api" } diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index d54190ca07..1f1fe37068 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -19,6 +19,7 @@ use codec::{Encode, Decode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; +use sp_consensus_slots::EquivocationProof; use sp_runtime::traits::Header; const SLOT_HEADER_MAP_KEY: &[u8] = b"slot_header_map"; @@ -44,31 +45,6 @@ fn load_decode(backend: &C, key: &[u8]) -> ClientResult> } } -/// Represents an equivocation proof. -#[derive(Debug, Clone)] -pub struct EquivocationProof { - slot: u64, - fst_header: H, - snd_header: H, -} - -impl EquivocationProof { - /// Get the slot number where the equivocation happened. - pub fn slot(&self) -> u64 { - self.slot - } - - /// Get the first header involved in the equivocation. - pub fn fst_header(&self) -> &H { - &self.fst_header - } - - /// Get the second header involved in the equivocation. - pub fn snd_header(&self) -> &H { - &self.snd_header - } -} - /// Checks if the header is an equivocation and returns the proof in that case. /// /// Note: it detects equivocations only when slot_now - slot <= MAX_SLOT_CAPACITY. @@ -78,7 +54,7 @@ pub fn check_equivocation( slot: u64, header: &H, signer: &P, -) -> ClientResult>> +) -> ClientResult>> where H: Header, C: AuxStore, @@ -114,9 +90,10 @@ pub fn check_equivocation( // 2) with different hash if header.hash() != prev_header.hash() { return Ok(Some(EquivocationProof { - slot, // 3) and mentioning the same slot. - fst_header: prev_header.clone(), - snd_header: header.clone(), + slot_number: slot, + offender: signer.clone(), + first_header: prev_header.clone(), + second_header: header.clone(), })); } else { // We don't need to continue in case of duplicated header, diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 845acce5f2..e29965ee46 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -13,40 +13,52 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } -sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } -sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/timestamp" } +pallet-authorship = { version = "2.0.0-rc4", default-features = false, path = "../authorship" } pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../session" } +pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../timestamp" } +serde = { version = "1.0.101", optional = true } +sp-application-crypto = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-babe = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-vrf = { version = "0.8.0-rc4", default-features = false, path = "../../primitives/consensus/vrf" } +sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } sp-io = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/staking" } +sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } +sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/timestamp" } [dev-dependencies] +frame-benchmarking = { version = "2.0.0-rc4", path = "../benchmarking" } +pallet-balances = { version = "2.0.0-rc4", path = "../balances" } +pallet-offences = { version = "2.0.0-rc4", path = "../offences" } +pallet-staking = { version = "2.0.0-rc4", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0-rc4", path = "../staking/reward-curve" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } [features] default = ["std"] std = [ "codec/std", - "serde", - "sp-std/std", - "sp-application-crypto/std", + "frame-benchmarking/std", "frame-support/std", - "sp-runtime/std", - "sp-staking/std", "frame-system/std", + "pallet-authorship/std", + "pallet-session/std", "pallet-timestamp/std", - "sp-timestamp/std", - "sp-inherents/std", + "serde", + "sp-application-crypto/std", "sp-consensus-babe/std", "sp-consensus-vrf/std", - "pallet-session/std", + "sp-inherents/std", "sp-io/std", + "sp-runtime/std", + "sp-session/std", + "sp-staking/std", + "sp-std/std", + "sp-timestamp/std", ] +runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs new file mode 100644 index 0000000000..e168c1b93b --- /dev/null +++ b/frame/babe/src/benchmarking.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the BABE Pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use super::*; +use frame_benchmarking::benchmarks; + +type Header = sp_runtime::generic::Header; + +benchmarks! { + _ { } + + check_equivocation_proof { + let x in 0 .. 1; + + // NOTE: generated with the test below `test_generate_equivocation_report_blob`. + // the output is not deterministic since keys are generated randomly (and therefore + // signature content changes). it should not affect the benchmark. + // with the current benchmark setup it is not possible to generate this programatically + // from the benchmark setup. + const EQUIVOCATION_PROOF_BLOB: [u8; 416] = [ + 222, 241, 46, 66, 243, 228, 135, 233, 177, 64, 149, 170, 141, 92, 193, 106, 51, 73, 31, + 27, 80, 218, 220, 248, 129, 29, 20, 128, 243, 250, 134, 39, 11, 0, 0, 0, 0, 0, 0, 0, + 158, 4, 7, 240, 67, 153, 134, 190, 251, 196, 229, 95, 136, 165, 234, 228, 255, 18, 2, + 187, 76, 125, 108, 50, 67, 33, 196, 108, 38, 115, 179, 86, 40, 36, 27, 5, 105, 58, 228, + 94, 198, 65, 212, 218, 213, 61, 170, 21, 51, 249, 182, 121, 101, 91, 204, 25, 31, 87, + 219, 208, 43, 119, 211, 185, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 6, 66, 65, 66, 69, 52, 2, 0, 0, 0, 0, 11, + 0, 0, 0, 0, 0, 0, 0, 5, 66, 65, 66, 69, 1, 1, 188, 192, 217, 91, 138, 78, 217, 80, 8, + 29, 140, 55, 242, 210, 170, 184, 73, 98, 135, 212, 236, 209, 115, 52, 200, 79, 175, + 172, 242, 161, 199, 47, 236, 93, 101, 95, 43, 34, 141, 16, 247, 220, 33, 59, 31, 197, + 27, 7, 196, 62, 12, 238, 236, 124, 136, 191, 29, 36, 22, 238, 242, 202, 57, 139, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 23, 175, 153, 83, 6, 33, 65, 123, 51, 80, 223, 126, 186, 226, 225, 240, 105, 28, + 169, 9, 54, 11, 138, 46, 194, 201, 250, 48, 242, 125, 117, 116, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 6, 66, 65, + 66, 69, 52, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 5, 66, 65, 66, 69, 1, 1, 142, 12, + 124, 11, 167, 227, 103, 88, 78, 23, 228, 33, 96, 41, 207, 183, 227, 189, 114, 70, 254, + 30, 128, 243, 233, 83, 214, 45, 74, 182, 120, 119, 64, 243, 219, 119, 63, 240, 205, + 123, 231, 82, 205, 174, 143, 70, 2, 86, 182, 20, 16, 141, 145, 91, 116, 195, 58, 223, + 175, 145, 255, 7, 121, 133 + ]; + + let equivocation_proof1: sp_consensus_babe::EquivocationProof

= + Decode::decode(&mut &EQUIVOCATION_PROOF_BLOB[..]).unwrap(); + + let equivocation_proof2 = equivocation_proof1.clone(); + }: { + sp_consensus_babe::check_equivocation_proof::
(equivocation_proof1); + } verify { + assert!(sp_consensus_babe::check_equivocation_proof::
(equivocation_proof2)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext(3).execute_with(|| { + assert_ok!(test_benchmark_check_equivocation_proof::()); + }) + } + + #[test] + fn test_generate_equivocation_report_blob() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + let offending_authority_index = 0; + let offending_authority_pair = &pairs[0]; + + ext.execute_with(|| { + start_era(1); + + let equivocation_proof = generate_equivocation_proof( + offending_authority_index, + offending_authority_pair, + CurrentSlot::get() + 1, + ); + + println!("equivocation_proof: {:?}", equivocation_proof); + println!( + "equivocation_proof.encode(): {:?}", + equivocation_proof.encode() + ); + }); + } +} diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs new file mode 100644 index 0000000000..322dff92f2 --- /dev/null +++ b/frame/babe/src/equivocation.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! +//! An opt-in utility module for reporting equivocations. +//! +//! This module defines an offence type for BABE equivocations +//! and some utility traits to wire together: +//! - a system for reporting offences; +//! - a system for submitting unsigned transactions; +//! - a way to get the current block author; +//! +//! These can be used in an offchain context in order to submit equivocation +//! reporting extrinsics (from the client that's import BABE blocks). +//! And in a runtime context, so that the BABE pallet can validate the +//! equivocation proofs in the extrinsic and report the offences. +//! +//! IMPORTANT: +//! When using this module for enabling equivocation reporting it is required +//! that the `ValidateUnsigned` for the BABE pallet is used in the runtime +//! definition. +//! + +use frame_support::{debug, traits::KeyOwnerProofSystem}; +use sp_consensus_babe::{EquivocationProof, SlotNumber}; +use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, +}; +use sp_runtime::{DispatchResult, Perbill}; +use sp_staking::{ + offence::{Kind, Offence, OffenceError, ReportOffence}, + SessionIndex, +}; +use sp_std::prelude::*; + +use crate::{Call, Module, Trait}; + +/// A trait with utility methods for handling equivocation reports in BABE. +/// The trait provides methods for reporting an offence triggered by a valid +/// equivocation report, checking the current block author (to declare as the +/// reporter), and also for creating and submitting equivocation report +/// extrinsics (useful only in offchain context). +pub trait HandleEquivocation { + /// Report an offence proved by the given reporters. + fn report_offence( + reporters: Vec, + offence: BabeEquivocationOffence, + ) -> Result<(), OffenceError>; + + /// Returns true if all of the offenders at the given time slot have already been reported. + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool; + + /// Create and dispatch an equivocation report extrinsic. + fn submit_unsigned_equivocation_report( + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResult; + + /// Fetch the current block author id, if defined. + fn block_author() -> Option; +} + +impl HandleEquivocation for () { + fn report_offence( + _reporters: Vec, + _offence: BabeEquivocationOffence, + ) -> Result<(), OffenceError> { + Ok(()) + } + + fn is_known_offence(_offenders: &[T::KeyOwnerIdentification], _time_slot: &SlotNumber) -> bool { + true + } + + fn submit_unsigned_equivocation_report( + _equivocation_proof: EquivocationProof, + _key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResult { + Ok(()) + } + + fn block_author() -> Option { + None + } +} + +/// Generic equivocation handler. This type implements `HandleEquivocation` +/// using existing subsystems that are part of frame (type bounds described +/// below) and will dispatch to them directly, it's only purpose is to wire all +/// subsystems together. +pub struct EquivocationHandler { + _phantom: sp_std::marker::PhantomData<(I, R)>, +} + +impl Default for EquivocationHandler { + fn default() -> Self { + Self { + _phantom: Default::default(), + } + } +} + +impl HandleEquivocation for EquivocationHandler +where + // We use the authorship pallet to fetch the current block author and use + // `offchain::SendTransactionTypes` for unsigned extrinsic creation and + // submission. + T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + // A system for reporting offences after valid equivocation reports are + // processed. + R: ReportOffence< + T::AccountId, + T::KeyOwnerIdentification, + BabeEquivocationOffence, + >, +{ + fn report_offence( + reporters: Vec, + offence: BabeEquivocationOffence, + ) -> Result<(), OffenceError> { + R::report_offence(reporters, offence) + } + + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool { + R::is_known_offence(offenders, time_slot) + } + + fn submit_unsigned_equivocation_report( + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResult { + use frame_system::offchain::SubmitTransaction; + + let call = Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof); + + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(()) => debug::info!("Submitted BABE equivocation report."), + Err(e) => debug::error!("Error submitting equivocation report: {:?}", e), + } + + Ok(()) + } + + fn block_author() -> Option { + Some(>::author()) + } +} + +/// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` +/// to local calls (i.e. extrinsics generated on this node) or that already in a block. This +/// guarantees that only block authors can include unsigned equivocation reports. +impl frame_support::unsigned::ValidateUnsigned for Module { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { + // discard equivocation report not coming from the local node + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + _ => { + debug::warn!( + target: "babe", + "rejecting unsigned report equivocation transaction because it is not local/in-block." + ); + + return InvalidTransaction::Call.into(); + } + } + + ValidTransaction::with_tag_prefix("BabeEquivocation") + // We assign the maximum priority for any equivocation report. + .priority(TransactionPriority::max_value()) + // Only one equivocation report for the same offender at the same slot. + .and_provides(( + equivocation_proof.offender.clone(), + equivocation_proof.slot_number, + )) + // We don't propagate this. This can never be included on a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { + // check the membership proof to extract the offender's id + let key = ( + sp_consensus_babe::KEY_TYPE, + equivocation_proof.offender.clone(), + ); + + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) + .ok_or(InvalidTransaction::BadProof)?; + + // check if the offence has already been reported, + // and if so then we can discard the report. + let is_known_offence = T::HandleEquivocation::is_known_offence( + &[offender], + &equivocation_proof.slot_number, + ); + + if is_known_offence { + Err(InvalidTransaction::Stale.into()) + } else { + Ok(()) + } + } else { + Err(InvalidTransaction::Call.into()) + } + } +} + +/// A BABE equivocation offence report. +/// +/// When a validator released two or more blocks at the same slot. +pub struct BabeEquivocationOffence { + /// A babe slot number in which this incident happened. + pub slot: SlotNumber, + /// The session index in which the incident happened. + pub session_index: SessionIndex, + /// The size of the validator set at the time of the offence. + pub validator_set_count: u32, + /// The authority that produced the equivocation. + pub offender: FullIdentification, +} + +impl Offence + for BabeEquivocationOffence +{ + const ID: Kind = *b"babe:equivocatio"; + type TimeSlot = SlotNumber; + + fn offenders(&self) -> Vec { + vec![self.offender.clone()] + } + + fn session_index(&self) -> SessionIndex { + self.session_index + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> Self::TimeSlot { + self.slot + } + + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + // the formula is min((3k / n)^2, 1) + let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + // _ ^ 2 + x.square() + } +} diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 9142173932..f80ac18643 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -21,37 +21,44 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_must_use)] -use pallet_timestamp; - -use sp_std::{result, prelude::*}; +use codec::{Decode, Encode}; use frame_support::{ - decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}, + decl_error, decl_module, decl_storage, + traits::{FindAuthor, Get, KeyOwnerProofSystem, Randomness as RandomnessT}, weights::Weight, + Parameter, }; -use sp_timestamp::OnTimestampSet; -use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; -use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash, One}; -use sp_staking::{ - SessionIndex, - offence::{Offence, Kind}, -}; +use frame_system::{ensure_none, ensure_signed}; use sp_application_crypto::Public; +use sp_runtime::{ + generic::DigestItem, + traits::{Hash, IsMember, One, SaturatedConversion, Saturating}, + ConsensusEngineId, KeyTypeId, +}; +use sp_session::{GetSessionNumber, GetValidatorCount}; +use sp_std::{prelude::*, result}; +use sp_timestamp::OnTimestampSet; -use codec::{Encode, Decode}; -use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; use sp_consensus_babe::{ - BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, SlotNumber, - inherents::{INHERENT_IDENTIFIER, BabeInherentData}, - digests::{NextEpochDescriptor, NextConfigDescriptor, PreDigest}, + digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, + inherents::{BabeInherentData, INHERENT_IDENTIFIER}, + BabeAuthorityWeight, ConsensusLog, EquivocationProof, SlotNumber, BABE_ENGINE_ID, }; use sp_consensus_vrf::schnorrkel; -pub use sp_consensus_babe::{AuthorityId, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, PUBLIC_KEY_LENGTH}; +use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; -#[cfg(all(feature = "std", test))] -mod tests; +pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; +mod equivocation; + +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarking; #[cfg(all(feature = "std", test))] mod mock; +#[cfg(all(feature = "std", test))] +mod tests; + +pub use equivocation::{BabeEquivocationOffence, EquivocationHandler, HandleEquivocation}; pub trait Trait: pallet_timestamp::Trait { /// The amount of time, in slots, that each epoch should last. @@ -70,6 +77,30 @@ pub trait Trait: pallet_timestamp::Trait { /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used /// when no other module is responsible for changing authority set. type EpochChangeTrigger: EpochChangeTrigger; + + /// The proof of key ownership, used for validating equivocation reports. + /// The proof must include the session index and validator count of the + /// session at which the equivocation occurred. + type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; + + /// The identification of a key owner, used when reporting equivocations. + type KeyOwnerIdentification: Parameter; + + /// A system for proving ownership of keys, i.e. that a given key was part + /// of a validator set, needed for validating equivocation reports. + type KeyOwnerProofSystem: KeyOwnerProofSystem< + (KeyTypeId, AuthorityId), + Proof = Self::KeyOwnerProof, + IdentificationTuple = Self::KeyOwnerIdentification, + >; + + /// The equivocation handling subsystem, defines methods to report an + /// offence (after the equivocation has been validated) and for submitting a + /// transaction to report an equivocation (from an offchain context). + /// NOTE: when enabling equivocation handling (i.e. this type isn't set to + /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime + /// definition. + type HandleEquivocation: HandleEquivocation; } /// Trigger an epoch change, if any should take place. @@ -106,6 +137,17 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeRandomness = Option; +decl_error! { + pub enum Error for Module { + /// An equivocation proof provided as part of an equivocation report is invalid. + InvalidEquivocationProof, + /// A key ownership proof provided as part of an equivocation report is invalid. + InvalidKeyOwnershipProof, + /// A given equivocation report is valid but already previously reported. + DuplicateOffenceReport, + } +} + decl_storage! { trait Store for Module as Babe { /// Current epoch index. @@ -208,6 +250,69 @@ decl_module! { // remove temporary "environment" entry from storage Lateness::::kill(); } + + /// Report authority equivocation/misbehavior. This method will verify + /// the equivocation proof and validate the given key ownership proof + /// against the extracted offender. If both are valid, the offence will + /// be reported. + #[weight = weight::weight_for_report_equivocation::()] + fn report_equivocation( + origin, + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) { + let reporter = ensure_signed(origin)?; + + Self::do_report_equivocation( + Some(reporter), + equivocation_proof, + key_owner_proof, + )?; + } + + /// Report authority equivocation/misbehavior. This method will verify + /// the equivocation proof and validate the given key ownership proof + /// against the extracted offender. If both are valid, the offence will + /// be reported. + /// This extrinsic must be called unsigned and it is expected that only + /// block authors will call it (validated in `ValidateUnsigned`), as such + /// if the block author is defined it will be defined as the equivocation + /// reporter. + #[weight = weight::weight_for_report_equivocation::()] + fn report_equivocation_unsigned( + origin, + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) { + ensure_none(origin)?; + + Self::do_report_equivocation( + T::HandleEquivocation::block_author(), + equivocation_proof, + key_owner_proof, + )?; + } + } +} + +mod weight { + use frame_support::{ + traits::Get, + weights::{constants::WEIGHT_PER_MICROS, Weight}, + }; + + pub fn weight_for_report_equivocation() -> Weight { + // checking membership proof + (35 * WEIGHT_PER_MICROS) + .saturating_add(T::DbWeight::get().reads(5)) + // check equivocation proof + .saturating_add(110 * WEIGHT_PER_MICROS) + // report offence + .saturating_add(110 * WEIGHT_PER_MICROS) + // worst case we are considering is that the given offender + // is backed by 200 nominators + .saturating_add(T::DbWeight::get().reads(14 + 3 * 200)) + .saturating_add(T::DbWeight::get().writes(10 + 3 * 200)) } } @@ -274,51 +379,6 @@ impl pallet_session::ShouldEndSession for Module { } } -/// A BABE equivocation offence report. -/// -/// When a validator released two or more blocks at the same slot. -pub struct BabeEquivocationOffence { - /// A babe slot number in which this incident happened. - pub slot: u64, - /// The session index in which the incident happened. - pub session_index: SessionIndex, - /// The size of the validator set at the time of the offence. - pub validator_set_count: u32, - /// The authority that produced the equivocation. - pub offender: FullIdentification, -} - -impl Offence for BabeEquivocationOffence { - const ID: Kind = *b"babe:equivocatio"; - type TimeSlot = u64; - - fn offenders(&self) -> Vec { - vec![self.offender.clone()] - } - - fn session_index(&self) -> SessionIndex { - self.session_index - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> Self::TimeSlot { - self.slot - } - - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { - // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); - // _ ^ 2 - x.square() - } -} - impl Module { /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { @@ -561,6 +621,69 @@ impl Module { Authorities::put(authorities); } } + + fn do_report_equivocation( + reporter: Option, + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) -> Result<(), Error> { + let offender = equivocation_proof.offender.clone(); + let slot_number = equivocation_proof.slot_number; + + // validate the equivocation proof + if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { + return Err(Error::InvalidEquivocationProof.into()); + } + + let validator_set_count = key_owner_proof.validator_count(); + let session_index = key_owner_proof.session(); + + let epoch_index = (slot_number.saturating_sub(GenesisSlot::get()) / T::EpochDuration::get()) + .saturated_into::(); + + // check that the slot number is consistent with the session index + // in the key ownership proof (i.e. slot is for that epoch) + if epoch_index != session_index { + return Err(Error::InvalidKeyOwnershipProof.into()); + } + + // check the membership proof and extract the offender's id + let key = (sp_consensus_babe::KEY_TYPE, offender); + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof) + .ok_or(Error::InvalidKeyOwnershipProof)?; + + let offence = BabeEquivocationOffence { + slot: slot_number, + validator_set_count, + offender, + session_index, + }; + + let reporters = match reporter { + Some(id) => vec![id], + None => vec![], + }; + + T::HandleEquivocation::report_offence(reporters, offence) + .map_err(|_| Error::DuplicateOffenceReport)?; + + Ok(()) + } + + /// Submits an extrinsic to report an equivocation. This method will create + /// an unsigned extrinsic with a call to `report_equivocation_unsigned` and + /// will push the transaction to the pool. Only useful in an offchain + /// context. + pub fn submit_unsigned_equivocation_report( + equivocation_proof: EquivocationProof, + key_owner_proof: T::KeyOwnerProof, + ) -> Option<()> { + T::HandleEquivocation::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + .ok() + } } impl OnTimestampSet for Module { diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index b977ea9044..c398aaeb85 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -18,27 +18,37 @@ //! Test utilities use codec::Encode; -use super::{Trait, Module, GenesisConfig, CurrentSlot}; +use super::{Trait, Module, CurrentSlot}; use sp_runtime::{ Perbill, impl_opaque_keys, - testing::{Header, UintAuthorityId, Digest, DigestItem}, - traits::IdentityLookup, + curve::PiecewiseLinear, + testing::{Digest, DigestItem, Header, TestXt,}, + traits::{Convert, Header as _, IdentityLookup, OpaqueKeys, SaturatedConversion}, }; use frame_system::InitKind; use frame_support::{ - impl_outer_origin, parameter_types, StorageValue, - traits::OnInitialize, + impl_outer_dispatch, impl_outer_origin, parameter_types, StorageValue, + traits::{KeyOwnerProofSystem, OnInitialize}, weights::Weight, }; use sp_io; -use sp_core::{H256, U256, crypto::Pair}; -use sp_consensus_babe::AuthorityPair; +use sp_core::{H256, U256, crypto::{KeyTypeId, Pair}}; +use sp_consensus_babe::{AuthorityId, AuthorityPair, SlotNumber}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_staking::SessionIndex; +use pallet_staking::EraIndex; impl_outer_origin!{ pub enum Origin for Test where system = frame_system {} } +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + babe::Babe, + staking::Staking, + } +} + type DummyValidatorId = u64; // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -50,7 +60,6 @@ parameter_types! { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MinimumPeriod: u64 = 1; pub const EpochDuration: u64 = 3; pub const ExpectedBlockTime: u64 = 1; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); @@ -61,7 +70,7 @@ impl frame_system::Trait for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; @@ -78,27 +87,55 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type ModuleToIndex = (); - type AccountData = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); } +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = TestXt; +} + impl_opaque_keys! { pub struct MockSessionKeys { - pub dummy: UintAuthorityId, + pub babe_authority: super::Module, } } impl pallet_session::Trait for Test { type Event = (); type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; - type SessionHandler = (Babe,); - type SessionManager = (); - type ValidatorIdOf = (); + type NextSessionRotation = Babe; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; type Keys = MockSessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = Babe; +} + +impl pallet_session::historical::Trait for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +parameter_types! { + pub const UncleGenerations: u64 = 0; +} + +impl pallet_authorship::Trait for Test { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type UncleGenerations = UncleGenerations; + type FilterUncle = (); + type EventHandler = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Trait for Test { @@ -107,33 +144,142 @@ impl pallet_timestamp::Trait for Test { type MinimumPeriod = MinimumPeriod; } +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} + +impl pallet_balances::Trait for Test { + type Balance = u128; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000u64, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: EraIndex = 3; + pub const SlashDeferDuration: EraIndex = 0; + pub const AttestationPeriod: u64 = 100; + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const ElectionLookahead: u64 = 0; + pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; +} + +pub struct CurrencyToVoteHandler; + +impl Convert for CurrencyToVoteHandler { + fn convert(x: u128) -> u128 { + x + } +} + +impl Convert for CurrencyToVoteHandler { + fn convert(x: u128) -> u64 { + x.saturated_into() + } +} + +impl pallet_staking::Trait for Test { + type RewardRemainder = (); + type CurrencyToVote = CurrencyToVoteHandler; + type Event = (); + type Currency = Balances; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + type SlashCancelOrigin = frame_system::EnsureRoot; + type SessionInterface = Self; + type UnixTime = pallet_timestamp::Module; + type RewardCurve = RewardCurve; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type UnsignedPriority = StakingUnsignedPriority; + type MaxIterations = (); + type MinSolutionScoreBump = (); +} + +parameter_types! { + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); +} + +impl pallet_offences::Trait for Test { + type Event = (); + type IdentificationTuple = pallet_session::historical::IdentificationTuple; + type OnOffenceHandler = Staking; + type WeightSoftLimit = OffencesWeightSoftLimit; +} + impl Trait for Test { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = crate::ExternalTrigger; -} -pub fn new_test_ext(authorities_len: usize) -> (Vec, sp_io::TestExternalities) { - let pairs = (0..authorities_len).map(|i| { - AuthorityPair::from_seed(&U256::from(i).into()) - }).collect::>(); + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = + >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig { - authorities: pairs.iter().map(|a| (a.public(), 1)).collect(), - }.assimilate_storage::(&mut t).unwrap(); - (pairs, t.into()) + type HandleEquivocation = super::EquivocationHandler; } +pub type Balances = pallet_balances::Module; +pub type Historical = pallet_session::historical::Module; +pub type Offences = pallet_offences::Module; +pub type Session = pallet_session::Module; +pub type Staking = pallet_staking::Module; +pub type System = frame_system::Module; +pub type Timestamp = pallet_timestamp::Module; +pub type Babe = Module; + pub fn go_to_block(n: u64, s: u64) { + use frame_support::traits::OnFinalize; + + System::on_finalize(System::block_number()); + Session::on_finalize(System::block_number()); + Staking::on_finalize(System::block_number()); + + let parent_hash = if System::block_number() > 1 { + let hdr = System::finalize(); + hdr.hash() + } else { + System::parent_hash() + }; + let pre_digest = make_secondary_plain_pre_digest(0, s); - System::initialize(&n, &Default::default(), &Default::default(), &pre_digest, InitKind::Full); + + System::initialize(&n, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); System::set_block_number(n); + Timestamp::set_timestamp(n); + if s > 1 { CurrentSlot::put(s); } - // includes a call into `Babe::do_initialize`. + + System::on_initialize(n); Session::on_initialize(n); + Staking::on_initialize(n); } /// Slots will grow accordingly to blocks @@ -145,6 +291,19 @@ pub fn progress_to_block(n: u64) { } } +/// Progress to the first block at the given session +pub fn start_session(session_index: SessionIndex) { + let missing = (session_index - Session::current_index()) * 3; + progress_to_block(System::block_number() + missing as u64 + 1); + assert_eq!(Session::current_index(), session_index); +} + +/// Progress to the first block at the given era +pub fn start_era(era_index: EraIndex) { + start_session((era_index * 3).into()); + assert_eq!(Staking::current_era(), Some(era_index)); +} + pub fn make_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, slot_number: sp_consensus_babe::SlotNumber, @@ -177,6 +336,124 @@ pub fn make_secondary_plain_pre_digest( Digest { logs: vec![log] } } -pub type System = frame_system::Module; -pub type Babe = Module; -pub type Session = pallet_session::Module; +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len).1 +} + +pub fn new_test_ext_with_pairs(authorities_len: usize) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len).map(|i| { + AuthorityPair::from_seed(&U256::from(i).into()) + }).collect::>(); + + let public = pairs.iter().map(|p| p.public()).collect(); + + (pairs, new_test_ext_raw_authorities(public)) +} + +pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + // stashes are the index. + let session_keys: Vec<_> = authorities + .iter() + .enumerate() + .map(|(i, k)| { + ( + i as u64, + i as u64, + MockSessionKeys { + babe_authority: AuthorityId::from(k.clone()), + }, + ) + }) + .collect(); + + // controllers are the index + 1000 + let stakers: Vec<_> = (0..authorities.len()) + .map(|i| { + ( + i as u64, + i as u64 + 1000, + 10_000, + pallet_staking::StakerStatus::::Validator, + ) + }) + .collect(); + + let balances: Vec<_> = (0..authorities.len()) + .map(|i| (i as u64, 10_000_000)) + .collect(); + + // NOTE: this will initialize the babe authorities + // through OneSessionHandler::on_genesis_session + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + + let staking_config = pallet_staking::GenesisConfig:: { + stakers, + validator_count: 8, + force_era: pallet_staking::Forcing::ForceNew, + minimum_validator_count: 0, + invulnerables: vec![], + ..Default::default() + }; + + staking_config.assimilate_storage(&mut t).unwrap(); + + t.into() +} + +/// Creates an equivocation at the current block, by generating two headers. +pub fn generate_equivocation_proof( + offender_authority_index: u32, + offender_authority_pair: &AuthorityPair, + slot_number: SlotNumber, +) -> sp_consensus_babe::EquivocationProof
{ + use sp_consensus_babe::digests::CompatibleDigestItem; + + let current_block = System::block_number(); + let current_slot = CurrentSlot::get(); + + let make_header = || { + let parent_hash = System::parent_hash(); + let pre_digest = make_secondary_plain_pre_digest(offender_authority_index, slot_number); + System::initialize(¤t_block, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); + System::set_block_number(current_block); + Timestamp::set_timestamp(current_block); + System::finalize() + }; + + // sign the header prehash and sign it, adding it to the block as the seal + // digest item + let seal_header = |header: &mut Header| { + let prehash = header.hash(); + let seal = ::babe_seal( + offender_authority_pair.sign(prehash.as_ref()), + ); + header.digest_mut().push(seal); + }; + + // generate two headers at the current block + let mut h1 = make_header(); + let mut h2 = make_header(); + + seal_header(&mut h1); + seal_header(&mut h2); + + // restore previous runtime state + go_to_block(current_block, current_slot); + + sp_consensus_babe::EquivocationProof { + slot_number, + offender: offender_authority_pair.public(), + first_header: h1, + second_header: h2, + } +} diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index ecb3639fc5..bdd6748c3b 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -17,13 +17,16 @@ //! Consensus extension module tests for BABE consensus. -use super::*; +use super::{Call, *}; +use frame_support::{ + assert_err, assert_ok, + traits::{Currency, OnFinalize}, +}; use mock::*; -use frame_support::traits::OnFinalize; use pallet_session::ShouldEndSession; -use sp_core::crypto::IsWrappedBy; use sp_consensus_babe::AllowedSlots; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::crypto::{IsWrappedBy, Pair}; const EMPTY_RANDOMNESS: [u8; 32] = [ 74, 25, 49, 128, 53, 97, 244, 49, @@ -40,14 +43,14 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).1.execute_with(|| { + new_test_ext(4).execute_with(|| { assert_eq!(Babe::authorities().len(), 4) }) } #[test] fn check_module() { - new_test_ext(4).1.execute_with(|| { + new_test_ext(4).execute_with(|| { assert!(!Babe::should_end_session(0), "Genesis does not change sessions"); assert!(!Babe::should_end_session(200000), "BABE does not include the block number in epoch calculations"); @@ -56,7 +59,7 @@ fn check_module() { #[test] fn first_block_epoch_zero_start() { - let (pairs, mut ext) = new_test_ext(4); + let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { let genesis_slot = 100; @@ -124,7 +127,7 @@ fn first_block_epoch_zero_start() { #[test] fn authority_index() { - new_test_ext(4).1.execute_with(|| { + new_test_ext(4).execute_with(|| { assert_eq!( Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), None, "Trivially invalid authorities are ignored") @@ -133,7 +136,7 @@ fn authority_index() { #[test] fn can_predict_next_epoch_change() { - new_test_ext(0).1.execute_with(|| { + new_test_ext(1).execute_with(|| { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); @@ -154,7 +157,7 @@ fn can_predict_next_epoch_change() { #[test] fn can_enact_next_config() { - new_test_ext(0).1.execute_with(|| { + new_test_ext(1).execute_with(|| { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); @@ -183,3 +186,402 @@ fn can_enact_next_config() { assert_eq!(header.digest.logs[2], consensus_digest.clone()) }); } + +#[test] +fn report_equivocation_current_session_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let authorities = Babe::authorities(); + let validators = Session::validators(); + + // make sure that all authorities have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(1, validator), + pallet_staking::Exposure { + total: 10_000, + own: 10_000, + others: vec![], + }, + ); + } + + // we will use the validator at index 0 as the offending authority + let offending_validator_index = 0; + let offending_validator_id = Session::validators()[offending_validator_index]; + let offending_authority_pair = pairs + .into_iter() + .find(|p| p.public() == authorities[offending_validator_index].0) + .unwrap(); + + // generate an equivocation proof. it creates two headers at the given + // slot with different block hashes and signed by the given key + let equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + + // create the key ownership proof + let key = ( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + ); + let key_owner_proof = Historical::prove(key).unwrap(); + + // report the equivocation + Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) + .unwrap(); + + // start a new era so that the results of the offence report + // are applied at era end + start_era(2); + + // check that the balance of offending validator is slashed 100%. + assert_eq!( + Balances::total_balance(&offending_validator_id), + 10_000_000 - 10_000 + ); + assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); + assert_eq!( + Staking::eras_stakers(2, offending_validator_id), + pallet_staking::Exposure { + total: 0, + own: 0, + others: vec![], + }, + ); + + // check that the balances of all other validators are left intact. + for validator in &validators { + if *validator == offending_validator_id { + continue; + } + + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + assert_eq!( + Staking::eras_stakers(2, validator), + pallet_staking::Exposure { + total: 10_000, + own: 10_000, + others: vec![], + }, + ); + } + }) +} + +#[test] +fn report_equivocation_old_session_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let authorities = Babe::authorities(); + + // we will use the validator at index 0 as the offending authority + let offending_validator_index = 0; + let offending_validator_id = Session::validators()[offending_validator_index]; + let offending_authority_pair = pairs + .into_iter() + .find(|p| p.public() == authorities[offending_validator_index].0) + .unwrap(); + + // generate an equivocation proof at the current slot + let equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + + // create the key ownership proof + let key = ( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + ); + let key_owner_proof = Historical::prove(key).unwrap(); + + // start a new era and report the equivocation + // from the previous era + start_era(2); + + // check the balance of the offending validator + assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000); + assert_eq!( + Staking::slashable_balance_of(&offending_validator_id), + 10_000 + ); + + // report the equivocation + Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) + .unwrap(); + + // start a new era so that the results of the offence report + // are applied at era end + start_era(3); + + // check that the balance of offending validator is slashed 100%. + assert_eq!( + Balances::total_balance(&offending_validator_id), + 10_000_000 - 10_000 + ); + assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); + assert_eq!( + Staking::eras_stakers(3, offending_validator_id), + pallet_staking::Exposure { + total: 0, + own: 0, + others: vec![], + }, + ); + }) +} + +#[test] +fn report_equivocation_invalid_key_owner_proof() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let authorities = Babe::authorities(); + + // we will use the validator at index 0 as the offending authority + let offending_validator_index = 0; + let offending_authority_pair = pairs + .into_iter() + .find(|p| p.public() == authorities[offending_validator_index].0) + .unwrap(); + + // generate an equivocation proof at the current slot + let equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + + // create the key ownership proof + let key = ( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + ); + let mut key_owner_proof = Historical::prove(key).unwrap(); + + // we change the session index in the key ownership proof + // which should make it invalid + key_owner_proof.session = 0; + assert_err!( + Babe::report_equivocation_unsigned( + Origin::none(), + equivocation_proof.clone(), + key_owner_proof + ), + Error::::InvalidKeyOwnershipProof, + ); + + // it should fail as well if we create a key owner proof + // for a different authority than the offender + let key = (sp_consensus_babe::KEY_TYPE, &authorities[1].0); + let key_owner_proof = Historical::prove(key).unwrap(); + + // we need to progress to a new era to make sure that the key + // ownership proof is properly checked, otherwise since the state + // is still available the historical module will just check + // against current session data. + start_era(2); + + assert_err!( + Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof), + Error::::InvalidKeyOwnershipProof, + ); + }) +} + +#[test] +fn report_equivocation_invalid_equivocation_proof() { + use sp_runtime::traits::Header; + + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let authorities = Babe::authorities(); + + // we will use the validator at index 0 as the offending authority + let offending_validator_index = 0; + let offending_authority_pair = pairs + .into_iter() + .find(|p| p.public() == authorities[offending_validator_index].0) + .unwrap(); + + // create the key ownership proof + let key = ( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + ); + let key_owner_proof = Historical::prove(key).unwrap(); + + let assert_invalid_equivocation = |equivocation_proof| { + assert_err!( + Babe::report_equivocation_unsigned( + Origin::none(), + equivocation_proof, + key_owner_proof.clone(), + ), + Error::::InvalidEquivocationProof, + ) + }; + + // both headers have the same hash, no equivocation. + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + equivocation_proof.second_header = equivocation_proof.first_header.clone(); + assert_invalid_equivocation(equivocation_proof); + + // missing preruntime digest from one header + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + equivocation_proof.first_header.digest_mut().logs.remove(0); + assert_invalid_equivocation(equivocation_proof); + + // missing seal from one header + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + equivocation_proof.first_header.digest_mut().logs.remove(1); + assert_invalid_equivocation(equivocation_proof); + + // invalid slot number in proof compared to runtime digest + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + equivocation_proof.slot_number = 0; + assert_invalid_equivocation(equivocation_proof.clone()); + + // different slot numbers in headers + let h1 = equivocation_proof.first_header; + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get() + 1, + ); + + // use the header from the previous equivocation generated + // at the previous slot + equivocation_proof.first_header = h1.clone(); + + assert_invalid_equivocation(equivocation_proof.clone()); + + // invalid seal signature + let mut equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get() + 1, + ); + + // replace the seal digest with the digest from the + // previous header at the previous slot + equivocation_proof.first_header.digest_mut().pop(); + equivocation_proof + .first_header + .digest_mut() + .push(h1.digest().logs().last().unwrap().clone()); + + assert_invalid_equivocation(equivocation_proof.clone()); + }) +} + +#[test] +fn report_equivocation_validate_unsigned_prevents_duplicates() { + use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, + TransactionValidity, ValidTransaction, + }; + + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let authorities = Babe::authorities(); + + // generate and report an equivocation for the validator at index 0 + let offending_validator_index = 0; + let offending_authority_pair = pairs + .into_iter() + .find(|p| p.public() == authorities[offending_validator_index].0) + .unwrap(); + + let equivocation_proof = generate_equivocation_proof( + offending_validator_index as u32, + &offending_authority_pair, + CurrentSlot::get(), + ); + + let key = ( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + ); + let key_owner_proof = Historical::prove(key).unwrap(); + + let inner = + Call::report_equivocation_unsigned(equivocation_proof.clone(), key_owner_proof.clone()); + + // only local/inblock reports are allowed + assert_eq!( + ::validate_unsigned( + TransactionSource::External, + &inner, + ), + InvalidTransaction::Call.into(), + ); + + // the transaction is valid when passed as local + let tx_tag = (offending_authority_pair.public(), CurrentSlot::get()); + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + TransactionValidity::Ok(ValidTransaction { + priority: TransactionPriority::max_value(), + requires: vec![], + provides: vec![("BabeEquivocation", tx_tag).encode()], + longevity: TransactionLongevity::max_value(), + propagate: false, + }) + ); + + // the pre dispatch checks should also pass + assert_ok!(::pre_dispatch(&inner)); + + // we submit the report + Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) + .unwrap(); + + // the report should now be considered stale and the transaction is invalid + assert_err!( + ::pre_dispatch(&inner), + InvalidTransaction::Stale, + ); + }); +} diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 1cc1620125..d028f3c174 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -50,6 +50,7 @@ use sp_runtime::{ }, DispatchResult, Perbill, }; +use sp_session::GetSessionNumber; use sp_staking::{ offence::{Kind, Offence, OffenceError, ReportOffence}, SessionIndex, @@ -376,38 +377,3 @@ impl Offence x.square() } } - -/// A trait to get a session number the `MembershipProof` belongs to. -pub trait GetSessionNumber { - fn session(&self) -> SessionIndex; -} - -/// A trait to get the validator count at the session the `MembershipProof` -/// belongs to. -pub trait GetValidatorCount { - fn validator_count(&self) -> sp_session::ValidatorCount; -} - -impl GetSessionNumber for frame_support::Void { - fn session(&self) -> SessionIndex { - Default::default() - } -} - -impl GetValidatorCount for frame_support::Void { - fn validator_count(&self) -> sp_session::ValidatorCount { - Default::default() - } -} - -impl GetSessionNumber for sp_session::MembershipProof { - fn session(&self) -> SessionIndex { - self.session - } -} - -impl GetValidatorCount for sp_session::MembershipProof { - fn validator_count(&self) -> sp_session::ValidatorCount { - self.validator_count - } -} diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 3432c11020..3b3e595ad1 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -49,15 +49,18 @@ use sp_runtime::{ traits::Zero, DispatchResult, KeyTypeId, }; +use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::SessionIndex; mod equivocation; +#[cfg(all(feature = "std", test))] mod mock; +#[cfg(all(feature = "std", test))] mod tests; pub use equivocation::{ - EquivocationHandler, GetSessionNumber, GetValidatorCount, GrandpaEquivocationOffence, - GrandpaOffence, GrandpaTimeSlot, HandleEquivocation, ValidateEquivocationReport, + EquivocationHandler, GrandpaEquivocationOffence, GrandpaOffence, GrandpaTimeSlot, + HandleEquivocation, ValidateEquivocationReport, }; pub trait Trait: frame_system::Trait { diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index d313646b28..3bc1f4d3f3 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -86,10 +86,16 @@ impl ReportOffence for OffenceHandler { OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); Ok(()) } + + fn is_known_offence(_offenders: &[IdentificationTuple], _time_slot: &SessionIndex) -> bool { + false + } } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); t.into() } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 1d726aedbb..d0cc1bce22 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -323,21 +323,16 @@ benchmarks! { } report_offence_babe { - let r in 1 .. MAX_REPORTERS; let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); - let o = 1; - // Make r reporters - let mut reporters = vec![]; - for i in 0 .. r { - let reporter = account("reporter", i, SEED); - reporters.push(reporter); - } + // for babe equivocation reports the number of reporters + // and offenders is always 1 + let reporters = vec![account("reporter", 1, SEED)]; // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(o, n)?; + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; let keys = ImOnline::::keys(); let offence = BabeEquivocationOffence { @@ -357,9 +352,9 @@ benchmarks! { assert_eq!( System::::event_count(), 0 + 1 // offence - + 2 * r // reporter (reward + endowment) - + o // offenders slashed - + o * n // nominators slashed + + 2 // reporter (reward + endowment) + + 1 // offenders slashed + + n // nominators slashed ); } diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index a42f09697e..267e6e14c9 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -185,6 +185,15 @@ where Ok(()) } + + fn is_known_offence(offenders: &[T::IdentificationTuple], time_slot: &O::TimeSlot) -> bool { + let any_unknown = offenders.iter().any(|offender| { + let report_id = Self::report_id::(time_slot, offender); + !>::contains_key(&report_id) + }); + + !any_unknown + } } impl Module { diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 0fb6620b7d..ca9f46a198 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -174,6 +174,77 @@ fn doesnt_deposit_event_for_dups() { }); } +#[test] +fn reports_if_an_offence_is_dup() { + type TestOffence = Offence; + + new_test_ext().execute_with(|| { + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence = |time_slot, offenders| TestOffence { + validator_set_count: 5, + time_slot, + offenders, + }; + + let mut test_offence = offence(time_slot, vec![0]); + + // the report for authority 0 at time slot 42 should not be a known + // offence + assert!( + !>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + ) + ); + + // we report an offence for authority 0 at time slot 42 + Offences::report_offence(vec![], test_offence.clone()).unwrap(); + + // the same report should be a known offence now + assert!( + >::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + ) + ); + + // and reporting it again should yield a duplicate report error + assert_eq!( + Offences::report_offence(vec![], test_offence.clone()), + Err(OffenceError::DuplicateReport) + ); + + // after adding a new offender to the offence report + test_offence.offenders.push(1); + + // it should not be a known offence anymore + assert!( + !>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + ) + ); + + // and reporting it again should work without any error + assert_eq!( + Offences::report_offence(vec![], test_offence.clone()), + Ok(()) + ); + + // creating a new offence for the same authorities on the next slot + // should be considered a new offence and thefore not known + let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); + assert!( + !>::is_known_offence( + &test_offence_next_slot.offenders, + &test_offence_next_slot.time_slot + ) + ); + }); +} + #[test] fn should_properly_count_offences() { // We report two different authorities for the same issue. Ultimately, the 1st authority diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 391b80237e..1c2dbf7291 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -13,12 +13,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/std" } +sp-session = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/session" } sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../../primitives/runtime" } frame-system = { version = "2.0.0-rc4", default-features = false, path = "../../system" } frame-benchmarking = { version = "2.0.0-rc4", default-features = false, path = "../../benchmarking" } frame-support = { version = "2.0.0-rc4", default-features = false, path = "../../support" } pallet-staking = { version = "2.0.0-rc4", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } pallet-session = { version = "2.0.0-rc4", default-features = false, path = "../../session" } +rand = { version = "0.7.2", default-features = false } [dev-dependencies] serde = { version = "1.0.101" } @@ -33,6 +35,7 @@ pallet-balances = { version = "2.0.0-rc4", path = "../../balances" } default = ["std"] std = [ "sp-std/std", + "sp-session/std", "sp-runtime/std", "frame-system/std", "frame-benchmarking/std", diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 0df4dcfbd9..cc47189335 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -25,20 +25,30 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_system::RawOrigin; use frame_benchmarking::benchmarks; - -use pallet_session::*; -use pallet_session::Module as Session; - +use frame_support::{ + codec::Decode, + storage::StorageValue, + traits::{KeyOwnerProofSystem, OnInitialize}, +}; +use frame_system::RawOrigin; +use pallet_session::{historical::Module as Historical, Module as Session, *}; use pallet_staking::{ + benchmarking::create_validator_with_nominators, testing_utils::create_validators, MAX_NOMINATIONS, - benchmarking::create_validator_with_nominators, }; +use sp_runtime::traits::{One, StaticLookup}; + +const MAX_VALIDATORS: u32 = 1000; pub struct Module(pallet_session::Module); +pub trait Trait: pallet_session::Trait + pallet_session::historical::Trait + pallet_staking::Trait {} -pub trait Trait: pallet_session::Trait + pallet_staking::Trait {} +impl OnInitialize for Module { + fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { + pallet_session::Module::::on_initialize(n) + } +} benchmarks! { _ { } @@ -59,6 +69,88 @@ benchmarks! { let proof: Vec = vec![0,1,2,3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; }: _(RawOrigin::Signed(v_controller)) + + check_membership_proof_current_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } + + check_membership_proof_historical_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + + // skip to the next session so that the session is historical + // and the membership merkle proof must be checked. + Session::::rotate_session(); + + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } +} + +/// Sets up the benchmark for checking a membership proof. It creates the given +/// number of validators, sets random session keys and then creates a membership +/// proof for the first authority and returns its key and the proof. +fn check_membership_proof_setup( + n: u32, +) -> ( + (sp_runtime::KeyTypeId, &'static [u8; 32]), + sp_session::MembershipProof, +) { + pallet_staking::ValidatorCount::put(n); + + // create validators and set random session keys + for (n, who) in create_validators::(n, 1000) + .unwrap() + .into_iter() + .enumerate() + { + use rand::RngCore; + use rand::SeedableRng; + + let validator = T::Lookup::lookup(who).unwrap(); + let controller = pallet_staking::Module::::bonded(validator).unwrap(); + + let keys = { + let mut keys = [0u8; 128]; + + // we keep the keys for the first validator as 0x00000... + if n > 0 { + let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); + rng.fill_bytes(&mut keys); + } + + keys + }; + + let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); + let proof: Vec = vec![]; + + Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); + } + + Module::::on_initialize(T::BlockNumber::one()); + + // skip sessions until the new validator set is enacted + while Session::::validators().len() < n as usize { + Session::::rotate_session(); + } + + let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]); + + (key, Historical::::prove(key).unwrap()) } #[cfg(test)] diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 2a6e5b1a2d..641761c7d0 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -3355,6 +3355,10 @@ impl ReportOffence Ok(()) } } + + fn is_known_offence(offenders: &[Offender], time_slot: &O::TimeSlot) -> bool { + R::is_known_offence(offenders, time_slot) + } } #[allow(deprecated)] diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 196bddbdf5..06a8ce856d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -30,11 +30,11 @@ pub use sp_tracing; #[cfg(feature = "std")] pub use serde; +pub use sp_core::Void; #[doc(hidden)] pub use sp_std; #[doc(hidden)] pub use codec; -use codec::{Decode, Encode}; #[cfg(feature = "std")] #[doc(hidden)] pub use once_cell; @@ -364,11 +364,6 @@ macro_rules! assert_ok { } } -/// The void type - it cannot exist. -// Oh rust, you crack me up... -#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] -pub enum Void {} - #[cfg(feature = "std")] #[doc(hidden)] pub use serde::{Serialize, Deserialize}; diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 3649230468..8199bad6be 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -17,9 +17,10 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = merlin = { version = "2.0", default-features = false } sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../std" } sp-api = { version = "2.0.0-rc4", default-features = false, path = "../../api" } -sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } sp-consensus = { version = "0.8.0-rc4", optional = true, path = "../common" } +sp-consensus-slots = { version = "0.8.0-rc4", default-features = false, path = "../slots" } sp-consensus-vrf = { version = "0.8.0-rc4", path = "../vrf", default-features = false } +sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../core" } sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../inherents" } sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../runtime" } sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../timestamp" } @@ -27,14 +28,15 @@ sp-timestamp = { version = "2.0.0-rc4", default-features = false, path = "../../ [features] default = ["std"] std = [ - "sp-core/std", "sp-application-crypto/std", "codec/std", "merlin/std", "sp-std/std", "sp-api/std", "sp-consensus", + "sp-consensus-slots/std", "sp-consensus-vrf/std", + "sp-core/std", "sp-inherents/std", "sp-runtime/std", "sp-timestamp/std", diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 4b625abe9f..a680ca0656 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -17,18 +17,14 @@ //! Private implementation details of BABE digests. -#[cfg(feature = "std")] -use super::{BABE_ENGINE_ID, AuthoritySignature}; -use super::{AuthorityId, AuthorityIndex, SlotNumber, BabeAuthorityWeight, BabeEpochConfiguration, AllowedSlots}; -#[cfg(feature = "std")] -use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; -#[cfg(feature = "std")] -use std::fmt::Debug; -use codec::{Decode, Encode}; -#[cfg(feature = "std")] -use codec::Codec; +use super::{ + AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, + BabeEpochConfiguration, SlotNumber, BABE_ENGINE_ID, +}; +use codec::{Codec, Decode, Encode}; use sp_std::vec::Vec; -use sp_runtime::RuntimeDebug; +use sp_runtime::{generic::OpaqueDigestItemId, DigestItem, RuntimeDebug}; + use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; /// Raw BABE primary slot assignment pre-digest. @@ -151,7 +147,6 @@ impl From for BabeEpochConfiguration { } /// A digest item which is usable with BABE consensus. -#[cfg(feature = "std")] pub trait CompatibleDigestItem: Sized { /// Construct a digest item which contains a BABE pre-digest. fn babe_pre_digest(seal: PreDigest) -> Self; @@ -172,9 +167,8 @@ pub trait CompatibleDigestItem: Sized { fn as_next_config_descriptor(&self) -> Option; } -#[cfg(feature = "std")] impl CompatibleDigestItem for DigestItem where - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static + Hash: Send + Sync + Eq + Clone + Codec + 'static { fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 10d4aa5ae5..54f05d7bc5 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -23,17 +23,21 @@ pub mod digests; pub mod inherents; +pub use merlin::Transcript; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH + Randomness, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; -pub use merlin::Transcript; -use codec::{Encode, Decode}; -use sp_std::vec::Vec; -use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] use sp_core::vrf::{VRFTranscriptData, VRFTranscriptValue}; -use crate::digests::{NextEpochDescriptor, NextConfigDescriptor}; +use sp_runtime::{traits::Header, ConsensusEngineId, RuntimeDebug}; +use sp_std::vec::Vec; + +use crate::digests::{NextConfigDescriptor, NextEpochDescriptor}; + +/// Key type for BABE module. +pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BABE; mod app { use sp_application_crypto::{app_crypto, key_types::BABE, sr25519}; @@ -73,7 +77,10 @@ pub const MEDIAN_ALGORITHM_CARDINALITY: usize = 1200; // arbitrary suggestion by pub type AuthorityIndex = u32; /// A slot number. -pub type SlotNumber = u64; +pub use sp_consensus_slots::SlotNumber; + +/// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). +pub type EquivocationProof = sp_consensus_slots::EquivocationProof; /// The weight of an authority. // NOTE: we use a unique name for the weight to avoid conflicts with other @@ -256,6 +263,93 @@ pub struct BabeEpochConfiguration { pub allowed_slots: AllowedSlots, } +/// Verifies the equivocation proof by making sure that: both headers have +/// different hashes, are targetting the same slot, and have valid signatures by +/// the same authority. +pub fn check_equivocation_proof(proof: EquivocationProof) -> bool +where + H: Header, +{ + use digests::*; + use sp_application_crypto::RuntimeAppPublic; + + let find_pre_digest = |header: &H| { + header + .digest() + .logs() + .iter() + .find_map(|log| log.as_babe_pre_digest()) + }; + + let verify_seal_signature = |mut header: H, offender: &AuthorityId| { + let seal = header.digest_mut().pop()?.as_babe_seal()?; + let pre_hash = header.hash(); + + if !offender.verify(&pre_hash.as_ref(), &seal) { + return None; + } + + Some(()) + }; + + let verify_proof = || { + // we must have different headers for the equivocation to be valid + if proof.first_header.hash() == proof.second_header.hash() { + return None; + } + + let first_pre_digest = find_pre_digest(&proof.first_header)?; + let second_pre_digest = find_pre_digest(&proof.second_header)?; + + // both headers must be targetting the same slot and it must + // be the same as the one in the proof. + if proof.slot_number != first_pre_digest.slot_number() || + first_pre_digest.slot_number() != second_pre_digest.slot_number() + { + return None; + } + + // both headers must have been authored by the same authority + if first_pre_digest.authority_index() != second_pre_digest.authority_index() { + return None; + } + + // we finally verify that the expected authority has signed both headers and + // that the signature is valid. + verify_seal_signature(proof.first_header, &proof.offender)?; + verify_seal_signature(proof.second_header, &proof.offender)?; + + Some(()) + }; + + // NOTE: we isolate the verification code into an helper function that + // returns `Option<()>` so that we can use `?` to deal with any intermediate + // errors and discard the proof as invalid. + verify_proof().is_some() +} + +/// An opaque type used to represent the key ownership proof at the runtime API +/// boundary. The inner value is an encoded representation of the actual key +/// ownership proof which will be parameterized when defining the runtime. At +/// the runtime API boundary this type is unknown and as such we keep this +/// opaque representation, implementors of the runtime API will have to make +/// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. +#[derive(Decode, Encode, PartialEq)] +pub struct OpaqueKeyOwnershipProof(Vec); +impl OpaqueKeyOwnershipProof { + /// Create a new `OpaqueKeyOwnershipProof` using the given encoded + /// representation. + pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { + OpaqueKeyOwnershipProof(inner) + } + + /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key + /// ownership proof type. + pub fn decode(self) -> Option { + Decode::decode(&mut &self.0[..]).ok() + } +} + sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] @@ -269,5 +363,34 @@ sp_api::decl_runtime_apis! { /// Returns the slot number that started the current epoch. fn current_epoch_start() -> SlotNumber; + + /// Generates a proof of key ownership for the given authority in the + /// current epoch. An example usage of this module is coupled with the + /// session historical module to prove that a given authority key is + /// tied to a given staking identity during a specific session. Proofs + /// of key ownership are necessary for submitting equivocation reports. + /// NOTE: even though the API takes a `slot_number` as parameter the current + /// implementations ignores this parameter and instead relies on this + /// method being called at the correct block height, i.e. any point at + /// which the epoch for the given slot is live on-chain. Future + /// implementations will instead use indexed data through an offchain + /// worker, not requiring older states to be available. + fn generate_key_ownership_proof( + slot_number: SlotNumber, + authority_id: AuthorityId, + ) -> Option; + + /// Submits an unsigned extrinsic to report an equivocation. The caller + /// must provide the equivocation proof and a key ownership proof + /// (should be obtained using `generate_key_ownership_proof`). The + /// extrinsic will be unsigned and should only be accepted for local + /// authorship (not to be broadcast to the network). This method returns + /// `None` when creation of the extrinsic fails, e.g. if equivocation + /// reporting is disabled for the given runtime (i.e. this method is + /// hardcoded to return `None`). Only useful in an offchain context. + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: EquivocationProof, + key_owner_proof: OpaqueKeyOwnershipProof, + ) -> Option<()>; } } diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml new file mode 100644 index 0000000000..f8435495d9 --- /dev/null +++ b/primitives/consensus/slots/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "sp-consensus-slots" +version = "0.8.0-rc4" +authors = ["Parity Technologies "] +description = "Primitives for slots-based consensus" +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-rc2", default-features = false, path = "../../runtime" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-runtime/std", +] diff --git a/primitives/consensus/slots/src/lib.rs b/primitives/consensus/slots/src/lib.rs new file mode 100644 index 0000000000..f898cf9da6 --- /dev/null +++ b/primitives/consensus/slots/src/lib.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives for slots-based consensus engines. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; + +/// A slot number. +pub type SlotNumber = u64; + +/// Represents an equivocation proof. An equivocation happens when a validator +/// produces more than one block on the same slot. The proof of equivocation +/// are the given distinct headers that were signed by the validator and which +/// include the slot number. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct EquivocationProof { + /// Returns the authority id of the equivocator. + pub offender: Id, + /// The slot number at which the equivocation happened. + pub slot_number: SlotNumber, + /// The first header involved in the equivocation. + pub first_header: Header, + /// The second header involved in the equivocation. + pub second_header: Header, +} diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 1038c887e2..27f59f4fba 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -333,6 +333,11 @@ pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 { res } +/// The void type - it cannot exist. +// Oh rust, you crack me up... +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] +pub enum Void {} + /// Macro for creating `Maybe*` marker traits. /// /// Such a maybe-marker trait requires the given bound when `feature = std` and doesn't require diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 477100687e..38a852dafd 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -64,6 +64,48 @@ pub struct MembershipProof { pub validator_count: ValidatorCount, } +/// A utility trait to get a session number. This is implemented for +/// `MembershipProof` below to fetch the session number the given session +/// membership proof is for. It is useful when we need to deal with key owner +/// proofs generically (i.e. just typing against the `KeyOwnerProofSystem` +/// trait) but still restrict their capabilities. +pub trait GetSessionNumber { + fn session(&self) -> SessionIndex; +} + +/// A utility trait to get the validator count of a given session. This is +/// implemented for `MembershipProof` below and fetches the number of validators +/// in the session the membership proof is for. It is useful when we need to +/// deal with key owner proofs generically (i.e. just typing against the +/// `KeyOwnerProofSystem` trait) but still restrict their capabilities. +pub trait GetValidatorCount { + fn validator_count(&self) -> ValidatorCount; +} + +impl GetSessionNumber for sp_core::Void { + fn session(&self) -> SessionIndex { + Default::default() + } +} + +impl GetValidatorCount for sp_core::Void { + fn validator_count(&self) -> ValidatorCount { + Default::default() + } +} + +impl GetSessionNumber for MembershipProof { + fn session(&self) -> SessionIndex { + self.session + } +} + +impl GetValidatorCount for MembershipProof { + fn validator_count(&self) -> ValidatorCount { + self.validator_count + } +} + /// Generate the initial session keys with the given seeds, at the given block and store them in /// the client's keystore. #[cfg(feature = "std")] diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index e6536b5709..650a17e789 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -117,10 +117,21 @@ impl sp_runtime::traits::Printable for OffenceError { pub trait ReportOffence> { /// Report an `offence` and reward given `reporters`. fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError>; + + /// Returns true iff all of the given offenders have been previously reported + /// at the given time slot. This function is useful to prevent the sending of + /// duplicate offence reports. + fn is_known_offence(offenders: &[Offender], time_slot: &O::TimeSlot) -> bool; } impl> ReportOffence for () { - fn report_offence(_reporters: Vec, _offence: O) -> Result<(), OffenceError> { Ok(()) } + fn report_offence(_reporters: Vec, _offence: O) -> Result<(), OffenceError> { + Ok(()) + } + + fn is_known_offence(_offenders: &[Offender], _time_slot: &O::TimeSlot) -> bool { + true + } } /// A trait to take action on an offence. diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 06054c1240..2b94828e25 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -26,7 +26,7 @@ pub mod system; use sp_std::{prelude::*, marker::PhantomData}; use codec::{Encode, Decode, Input, Error}; -use sp_core::{OpaqueMetadata, RuntimeDebug, ChangesTrieConfiguration}; +use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; use sp_application_crypto::{ed25519, sr25519, ecdsa, RuntimeAppPublic}; use trie_db::{TrieMut, Trie}; use sp_trie::PrefixedMemoryDB; @@ -49,7 +49,11 @@ use sp_version::RuntimeVersion; pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use frame_support::{impl_outer_origin, parameter_types, weights::{Weight, RuntimeDbWeight}}; +use frame_support::{ + impl_outer_origin, parameter_types, + traits::KeyOwnerProofSystem, + weights::{RuntimeDbWeight, Weight}, +}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; @@ -462,6 +466,18 @@ impl pallet_babe::Trait for Runtime { // are manually adding the digests. normally in this situation you'd use // pallet_babe::SameAuthoritiesForever. type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = (); + + type KeyOwnerProof = + >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = (); } /// Adds one to the given input and returns the final result. @@ -690,6 +706,22 @@ cfg_if! { fn current_epoch_start() -> SlotNumber { >::current_epoch_start() } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_babe::EquivocationProof< + ::Header, + >, + _key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _slot_number: sp_consensus_babe::SlotNumber, + _authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + None + } } impl sp_offchain::OffchainWorkerApi for Runtime { @@ -916,6 +948,22 @@ cfg_if! { fn current_epoch_start() -> SlotNumber { >::current_epoch_start() } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_babe::EquivocationProof< + ::Header, + >, + _key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _slot_number: sp_consensus_babe::SlotNumber, + _authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + None + } } impl sp_offchain::OffchainWorkerApi for Runtime { -- GitLab From b851b755dbf338c69d3bae8c1215d8e7d8010b77 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 6 Jul 2020 11:34:24 +0200 Subject: [PATCH 125/144] Benchmarks Writer CLI (#6567) * initial mockup * add and wipe * track writes * start to add to pipeline * return all reads/writes * Log reads and writes from bench db * causes panic * Allow multiple commits * commit before ending benchmark * doesn't work??? * fix * Update lib.rs * switch to struct for `BenchmarkResults` * add to output * fix test * line width * @kianenigma review * Add Whitelist to DB Tracking in Benchmarks Pipeline (#6405) * hardcoded whitelist * Add whitelist to pipeline * Remove whitelist pipeline from CLI, add to runtime * clean-up unused db initialized whitelist * Add regression analysis to DB Tracking (#6475) * Add selector * add tests * debug formatter for easy formula * initial idea * use all benchmarks * broken * working without trait * Make work for multiple pallets * Fix merge issues * writer appends to file * implement () for balances weight trait * update name of trait * Weights to WeightInfo * auto trait writer * Heap pages are configurable * clean out runtime changes * more clean up * Fix string generation * Update comments * Update bin/node/runtime/src/lib.rs Co-authored-by: arkpar --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 39 ++-- frame/benchmarking/src/analysis.rs | 10 +- frame/benchmarking/src/lib.rs | 35 +++- utils/frame/benchmarking-cli/Cargo.toml | 1 + utils/frame/benchmarking-cli/src/command.rs | 12 +- utils/frame/benchmarking-cli/src/lib.rs | 13 ++ utils/frame/benchmarking-cli/src/writer.rs | 191 ++++++++++++++++++++ 8 files changed, 266 insertions(+), 36 deletions(-) create mode 100644 utils/frame/benchmarking-cli/src/writer.rs diff --git a/Cargo.lock b/Cargo.lock index 58ea4e1077..a77c7b2a40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1450,6 +1450,7 @@ dependencies = [ name = "frame-benchmarking-cli" version = "2.0.0-rc4" dependencies = [ + "Inflector", "frame-benchmarking", "parity-scale-codec", "sc-cli", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 70d001d62c..baf5f12b2f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1118,7 +1118,6 @@ impl_runtime_apis! { let whitelist: Vec> = vec![ // Block Number - // frame_system::Number::::hashed_key().to_vec(), hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), // Total Issuance hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), @@ -1137,25 +1136,25 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); - add_benchmark!(params, batches, b"babe", Babe); - add_benchmark!(params, batches, b"balances", Balances); - add_benchmark!(params, batches, b"collective", Council); - add_benchmark!(params, batches, b"democracy", Democracy); - add_benchmark!(params, batches, b"elections", Elections); - add_benchmark!(params, batches, b"identity", Identity); - add_benchmark!(params, batches, b"im-online", ImOnline); - add_benchmark!(params, batches, b"indices", Indices); - add_benchmark!(params, batches, b"multisig", Multisig); - add_benchmark!(params, batches, b"offences", OffencesBench::); - add_benchmark!(params, batches, b"proxy", Proxy); - add_benchmark!(params, batches, b"scheduler", Scheduler); - add_benchmark!(params, batches, b"session", SessionBench::); - add_benchmark!(params, batches, b"staking", Staking); - add_benchmark!(params, batches, b"system", SystemBench::); - add_benchmark!(params, batches, b"timestamp", Timestamp); - add_benchmark!(params, batches, b"treasury", Treasury); - add_benchmark!(params, batches, b"utility", Utility); - add_benchmark!(params, batches, b"vesting", Vesting); + add_benchmark!(params, batches, pallet_babe, Babe); + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_collective, Council); + add_benchmark!(params, batches, pallet_democracy, Democracy); + add_benchmark!(params, batches, pallet_elections_phragmen, Elections); + add_benchmark!(params, batches, pallet_identity, Identity); + add_benchmark!(params, batches, pallet_im_online, ImOnline); + add_benchmark!(params, batches, pallet_indices, Indices); + add_benchmark!(params, batches, pallet_multisig, Multisig); + add_benchmark!(params, batches, pallet_offences, OffencesBench::); + add_benchmark!(params, batches, pallet_proxy, Proxy); + add_benchmark!(params, batches, pallet_scheduler, Scheduler); + add_benchmark!(params, batches, pallet_session, SessionBench::); + add_benchmark!(params, batches, pallet_staking, Staking); + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_treasury, Treasury); + add_benchmark!(params, batches, pallet_utility, Utility); + add_benchmark!(params, batches, pallet_vesting, Vesting); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 621f3a2941..c17e206c34 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -22,11 +22,11 @@ use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionMode use crate::BenchmarkResults; pub struct Analysis { - base: u128, - slopes: Vec, - names: Vec, - value_dists: Option, u128, u128)>>, - model: Option, + pub base: u128, + pub slopes: Vec, + pub names: Vec, + pub value_dists: Option, u128, u128)>>, + pub model: Option, } pub enum BenchmarkSelector { diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 7a7848305a..532cb273c9 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1158,31 +1158,46 @@ macro_rules! impl_benchmark_test { /// First create an object that holds in the input parameters for the benchmark: /// /// ```ignore -/// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); +/// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); /// ``` /// +/// The `whitelist` is a `Vec>` of storage keys that you would like to skip for DB tracking. For example: +/// +/// ```ignore +/// let whitelist: Vec> = vec![ +/// // Block Number +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), +/// // Total Issuance +/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), +/// // Execution Phase +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), +/// // Event Count +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), +/// ]; +/// /// Then define a mutable local variable to hold your `BenchmarkBatch` object: /// /// ```ignore /// let mut batches = Vec::::new(); /// ```` /// -/// Then add the pallets you want to benchmark to this object, including the string -/// you want to use target a particular pallet: +/// Then add the pallets you want to benchmark to this object, using their crate name and generated +/// module struct: /// /// ```ignore -/// add_benchmark!(params, batches, b"balances", Balances); -/// add_benchmark!(params, batches, b"identity", Identity); -/// add_benchmark!(params, batches, b"session", SessionBench::); +/// add_benchmark!(params, batches, pallet_balances, Balances); +/// add_benchmark!(params, batches, pallet_session, SessionBench::); +/// add_benchmark!(params, batches, frame_system, SystemBench::); /// ... /// ``` /// /// At the end of `dispatch_benchmark`, you should return this batches object. #[macro_export] macro_rules! add_benchmark { - ( $params:ident, $batches:ident, $name:literal, $( $location:tt )* ) => ( + ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( + let name_string = stringify!($name).as_bytes(); let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist) = $params; - if &pallet[..] == &$name[..] || &pallet[..] == &b"*"[..] { + if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { for benchmark in $( $location )*::benchmarks().into_iter() { $batches.push($crate::BenchmarkBatch { @@ -1194,7 +1209,7 @@ macro_rules! add_benchmark { repeat, whitelist, )?, - pallet: $name.to_vec(), + pallet: name_string.to_vec(), benchmark: benchmark.to_vec(), }); } @@ -1208,7 +1223,7 @@ macro_rules! add_benchmark { repeat, whitelist, )?, - pallet: $name.to_vec(), + pallet: name_string.to_vec(), benchmark: benchmark.clone(), }); } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 003b4d9c05..db620c86ca 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -12,6 +12,7 @@ description = "CLI for benchmarking FRAME" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +Inflector = "0.11.4" frame-benchmarking = { version = "2.0.0-rc4", path = "../../../frame/benchmarking" } sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } sc-service = { version = "0.8.0-rc4", default-features = false, path = "../../../client/service" } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 7f55672885..09b246e476 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -55,7 +55,7 @@ impl BenchmarkCmd { let state = BenchmarkingState::::new(genesis_storage, cache_size)?; let executor = NativeExecutor::::new( wasm_method, - None, // heap pages + self.heap_pages, 2, // The runtime instances cache size. ); @@ -89,6 +89,16 @@ impl BenchmarkCmd { let results = , String> as Decode>::decode(&mut &result[..]) .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; + if self.output { + if self.weight_trait { + let mut file = crate::writer::open_file("traits.rs")?; + crate::writer::write_trait(&mut file, results.clone())?; + } else { + let mut file = crate::writer::open_file("benchmarks.rs")?; + crate::writer::write_results(&mut file, results.clone())?; + } + } + match results { Ok(batches) => for batch in batches.into_iter() { // Print benchmark metadata diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 149b971577..8a53c9fd8b 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -16,6 +16,7 @@ // limitations under the License. mod command; +mod writer; use sc_cli::{ExecutionStrategy, WasmExecutionMethod}; use std::fmt::Debug; @@ -59,6 +60,18 @@ pub struct BenchmarkCmd { #[structopt(long)] pub no_min_squares: bool, + /// Output the benchmarks to a Rust file. + #[structopt(long)] + pub output: bool, + + /// Output the trait definition to a Rust file. + #[structopt(long)] + pub weight_trait: bool, + + /// Set the heap pages while running benchmarks. + #[structopt(long)] + pub heap_pages: Option, + #[allow(missing_docs)] #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs new file mode 100644 index 0000000000..bd411b536a --- /dev/null +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -0,0 +1,191 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Outputs benchmark results to Rust files that can be ingested by the runtime. + +use std::fs::{File, OpenOptions}; +use std::io::prelude::*; +use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis}; +use inflector::Inflector; + +pub fn open_file(path: &str) -> Result { + OpenOptions::new() + .create(true) + .write(true) + .append(true) + .open(path) +} + +pub fn write_trait(file: &mut File, batches: Result, String>) -> Result<(), std::io::Error> { + let batches = batches.unwrap(); + + let mut current_pallet = Vec::::new(); + + batches.iter().for_each(|batch| { + + let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); + let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); + + // only create new trait definitions when we go to a new pallet + if batch.pallet != current_pallet { + if !current_pallet.is_empty() { + // close trait + write!(file, "}}\n").unwrap(); + } + + // trait wrapper + write!(file, "// {}\n", pallet_string).unwrap(); + write!(file, "pub trait WeightInfo {{\n").unwrap(); + + current_pallet = batch.pallet.clone() + } + + // function name + write!(file, " fn {}(", benchmark_string).unwrap(); + + // params + let components = &batch.results[0].components; + for component in components { + write!(file, "{:?}: u32, ", component.0).unwrap(); + } + // return value + write!(file, ") -> Weight;\n").unwrap(); + }); + + // final close trait + write!(file, "}}\n").unwrap(); + + // Reset + current_pallet = Vec::::new(); + + batches.iter().for_each(|batch| { + + let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); + + // only create new trait definitions when we go to a new pallet + if batch.pallet != current_pallet { + if !current_pallet.is_empty() { + // close trait + write!(file, "}}\n").unwrap(); + } + + // impl trait + write!(file, "\n").unwrap(); + write!(file, "impl WeightInfo for () {{\n").unwrap(); + + current_pallet = batch.pallet.clone() + } + + // function name + write!(file, " fn {}(", benchmark_string).unwrap(); + + // params + let components = &batch.results[0].components; + for component in components { + write!(file, "_{:?}: u32, ", component.0).unwrap(); + } + // return value + write!(file, ") -> Weight {{ 1_000_000_000 }}\n").unwrap(); + }); + + // final close trait + write!(file, "}}\n").unwrap(); + + Ok(()) +} + +pub fn write_results(file: &mut File, batches: Result, String>) -> Result<(), std::io::Error> { + let batches = batches.unwrap(); + + let mut current_pallet = Vec::::new(); + + // general imports + write!(file, "use frame_support::weights::{{Weight, constants::RocksDbWeight as DbWeight}};\n").unwrap(); + + batches.iter().for_each(|batch| { + + let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); + let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); + + // only create new trait definitions when we go to a new pallet + if batch.pallet != current_pallet { + if !current_pallet.is_empty() { + // close trait + write!(file, "}}\n").unwrap(); + } + + // struct for weights + write!(file, "pub struct WeightFor{};\n", + pallet_string.to_pascal_case(), + ).unwrap(); + + // trait wrapper + write!(file, "impl {}::WeightInfo for WeightFor{} {{\n", + pallet_string, + pallet_string.to_pascal_case(), + ).unwrap(); + + current_pallet = batch.pallet.clone() + } + + // function name + write!(file, " fn {}(", benchmark_string).unwrap(); + + // params + let components = &batch.results[0].components; + for component in components { + write!(file, "{:?}: u32, ", component.0).unwrap(); + } + // return value + write!(file, ") -> Weight {{\n").unwrap(); + + let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap(); + write!(file, " ({} as Weight)\n", extrinsic_time.base.saturating_mul(1000)).unwrap(); + extrinsic_time.slopes.iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { + write!(file, " .saturating_add(({} as Weight).saturating_mul({} as Weight))\n", + slope.saturating_mul(1000), + name, + ).unwrap(); + }); + + let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap(); + write!(file, " .saturating_add(DbWeight::get().reads({} as Weight))\n", reads.base).unwrap(); + reads.slopes.iter().zip(reads.names.iter()).for_each(|(slope, name)| { + write!(file, " .saturating_add(DbWeight::get().reads(({} as Weight).saturating_mul({} as Weight)))\n", + slope, + name, + ).unwrap(); + }); + + let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap(); + write!(file, " .saturating_add(DbWeight::get().writes({} as Weight))\n", writes.base).unwrap(); + writes.slopes.iter().zip(writes.names.iter()).for_each(|(slope, name)| { + write!(file, " .saturating_add(DbWeight::get().writes(({} as Weight).saturating_mul({} as Weight)))\n", + slope, + name, + ).unwrap(); + }); + + // close function + write!(file, " }}\n").unwrap(); + }); + + // final close trait + write!(file, "}}\n").unwrap(); + + Ok(()) +} -- GitLab From c5368a1f1e1ee285ce40dc0d9198abbf3425cca9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 6 Jul 2020 12:29:17 +0200 Subject: [PATCH 126/144] Don't require module name in inherents (#6576) * Start * Cleanup `construct_runtime!` * Add tests * Fix after merge * Update the docs --- Cargo.lock | 31 +++ bin/node-template/runtime/src/lib.rs | 2 +- bin/node/runtime/src/lib.rs | 2 +- frame/example/src/lib.rs | 2 +- frame/grandpa/src/equivocation.rs | 2 +- frame/proxy/src/lib.rs | 2 +- frame/staking/src/lib.rs | 2 +- frame/support/Cargo.toml | 1 + .../procedural/src/construct_runtime/mod.rs | 21 +- .../procedural/src/construct_runtime/parse.rs | 30 --- frame/support/procedural/src/lib.rs | 6 +- frame/support/src/dispatch.rs | 136 ++++++------- frame/support/src/inherent.rs | 184 +++++++++++++++--- .../params_in_invalid_module.rs | 14 -- .../params_in_invalid_module.stderr | 5 - 15 files changed, 279 insertions(+), 161 deletions(-) delete mode 100644 frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.rs delete mode 100644 frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.stderr diff --git a/Cargo.lock b/Cargo.lock index a77c7b2a40..c64da43d44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1247,6 +1247,33 @@ dependencies = [ "libc", ] +[[package]] +name = "ethbloom" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71a6567e6fd35589fea0c63b94b4cf2e55573e413901bdbe60ab15cf0e25e5df" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde 0.3.0", + "tiny-keccak 2.0.2", +] + +[[package]] +name = "ethereum-types" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "473aecff686bd8e7b9db0165cbbb53562376b39bf35b427f0c60446a9e1634b0" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde 0.3.0", + "primitive-types", + "uint", +] + [[package]] name = "evm" version = "0.16.1" @@ -1506,6 +1533,7 @@ dependencies = [ "log", "once_cell", "parity-scale-codec", + "parity-util-mem", "paste", "pretty_assertions", "serde", @@ -4915,7 +4943,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6e2583649a3ca84894d1d71da249abcfda54d5aca24733d72ca10d0f02361c" dependencies = [ "cfg-if", + "ethereum-types", + "hashbrown", "impl-trait-for-tuples", + "lru", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c58c478d92..30571b7e0b 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -268,7 +268,7 @@ construct_runtime!( System: system::{Module, Call, Config, Storage, Event}, RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Aura: aura::{Module, Config, Inherent(Timestamp)}, + Aura: aura::{Module, Config, Inherent}, Grandpa: grandpa::{Module, Call, Storage, Config, Event}, Balances: balances::{Module, Call, Storage, Config, Event}, TransactionPayment: transaction_payment::{Module, Storage}, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index baf5f12b2f..07b0a82e48 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -823,7 +823,7 @@ construct_runtime!( { System: frame_system::{Module, Call, Config, Storage, Event}, Utility: pallet_utility::{Module, Call, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, Inherent(Timestamp), ValidateUnsigned}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, Indices: pallet_indices::{Module, Call, Storage, Config, Event}, diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index c8799cb62c..00e0d78d9b 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -611,7 +611,7 @@ impl sp_std::fmt::Debug for WatchDummy { impl SignedExtension for WatchDummy where - ::Call: IsSubType, T>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index d028f3c174..9ac1c12128 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -100,7 +100,7 @@ impl From for TransactionValidityError { impl SignedExtension for ValidateEquivocationReport where - ::Call: IsSubType, T>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "ValidateEquivocationReport"; type AccountId = T::AccountId; diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index bd56ad3f0f..fb72fa8953 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -60,7 +60,7 @@ pub trait Trait: frame_system::Trait { /// The overarching call type. type Call: Parameter + Dispatchable - + GetDispatchInfo + From> + IsSubType, Self> + + GetDispatchInfo + From> + IsSubType> + IsType<::Call>; /// The currency mechanism. diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 641761c7d0..a59214410d 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -894,7 +894,7 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { type ElectionLookahead: Get; /// The overarching call type. - type Call: Dispatchable + From> + IsSubType, Self> + Clone; + type Call: Dispatchable + From> + IsSubType> + Clone; /// Maximum number of balancing iterations to run in the offchain submission. /// diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 596faf2639..2cfe1619a9 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -34,6 +34,7 @@ smallvec = "1.4.0" [dev-dependencies] pretty_assertions = "0.6.1" frame-system = { version = "2.0.0-rc4", path = "../system" } +parity-util-mem = { version = "0.6.1", features = ["primitive-types"] } [features] default = ["std"] diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index cac7549062..569413cbbc 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -87,7 +87,12 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( ) -> TokenStream2 { let modules_tokens = module_declarations.filter_map(|module_declaration| { let maybe_config_part = module_declaration.find_part("Inherent"); - maybe_config_part.map(|config_part| { - let arg = config_part - .args - .as_ref() - .and_then(|parens| parens.content.inner.iter().next()) - .unwrap_or(&module_declaration.name); + maybe_config_part.map(|_| { let name = &module_declaration.name; - quote!(#name : #arg,) + quote!(#name,) }) }); quote!( #scrate::impl_outer_inherent!( - impl Inherents where Block = #block, UncheckedExtrinsic = #unchecked_extrinsic { + impl Inherents where + Block = #block, + UncheckedExtrinsic = #unchecked_extrinsic + { #(#modules_tokens)* } ); diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 92a71687cc..c8481480ba 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -279,18 +279,6 @@ impl ModulePartKeyword { Ident::new(self.name(), self.span()) } - /// Returns `true` if this module part allows to have an argument. - /// - /// For example `Inherent(Timestamp)`. - fn allows_arg(&self) -> bool { - Self::all_allow_arg().iter().any(|n| *n == self.name()) - } - - /// Returns the names of all module parts that allow to have an argument. - fn all_allow_arg() -> &'static [&'static str] { - &["Inherent"] - } - /// Returns `true` if this module part is allowed to have generic arguments. fn allows_generic(&self) -> bool { Self::all_generic_arg().iter().any(|n| *n == self.name()) @@ -321,7 +309,6 @@ impl Spanned for ModulePartKeyword { pub struct ModulePart { pub keyword: ModulePartKeyword, pub generics: syn::Generics, - pub args: Option>>, } impl Parse for ModulePart { @@ -339,27 +326,10 @@ impl Parse for ModulePart { ); return Err(syn::Error::new(keyword.span(), msg)); } - let args = if input.peek(token::Paren) { - if !keyword.allows_arg() { - let syn::group::Parens { token: parens, .. } = syn::group::parse_parens(input)?; - let valid_names = ModulePart::format_names(ModulePartKeyword::all_allow_arg()); - let msg = format!( - "`{}` is not allowed to have arguments in parens. \ - Only the following modules are allowed to have arguments in parens: {}.", - keyword.name(), - valid_names, - ); - return Err(syn::Error::new(parens.span, msg)); - } - Some(input.parse()?) - } else { - None - }; Ok(Self { keyword, generics, - args, }) } } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index df5665ec48..57c6080a90 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -277,10 +277,8 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// - `Event` or `Event` (if the event is generic) /// - `Origin` or `Origin` (if the origin is generic) /// - `Config` or `Config` (if the config is generic) -/// - `Inherent ( $(CALL),* )` - If the module provides/can check inherents. The optional parameter -/// is for modules that use a `Call` from a different module as -/// inherent. -/// - `ValidateUnsigned` - If the module validates unsigned extrinsics. +/// - `Inherent` - If the module provides/can check inherents. +/// - `ValidateUnsigned` - If the module validates unsigned extrinsics. /// /// # Note /// diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 56aaed0836..dc305357e7 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -54,7 +54,7 @@ pub trait Callable { // dirty hack to work around serde_derive issue // https://github.com/rust-lang/rust/issues/51331 -pub type CallableCallFor = >::Call; +pub type CallableCallFor = >::Call; /// A type that can be used as a parameter in a dispatchable function. /// @@ -1848,8 +1848,8 @@ macro_rules! decl_module { } } -pub trait IsSubType, R> { - fn is_sub_type(&self) -> Option<&CallableCallFor>; +pub trait IsSubType { + fn is_sub_type(&self) -> Option<&T>; } /// Implement a meta-dispatch module to dispatch to other dispatchers. @@ -1948,7 +1948,7 @@ macro_rules! impl_outer_dispatch { } $( - impl $crate::dispatch::IsSubType<$camelcase, $runtime> for $call_type { + impl $crate::dispatch::IsSubType<$crate::dispatch::CallableCallFor<$camelcase, $runtime>> for $call_type { #[allow(unreachable_patterns)] fn is_sub_type(&self) -> Option<&$crate::dispatch::CallableCallFor<$camelcase, $runtime>> { match *self { @@ -2372,72 +2372,72 @@ mod tests { } const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ - " Hi, this is a comment." - ]) - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_2"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("String"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_3"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_5"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), + FunctionMetadata { + name: DecodeDifferent::Encode("aux_0"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[ + " Hi, this is a comment." + ]) + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_1"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("Compact") + } + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_2"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), }, - FunctionMetadata { - name: DecodeDifferent::Encode("operational"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("String"), + } + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_3"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_4"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + } + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_5"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), }, - ]; + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("Compact") + } + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("operational"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]; pub struct TraitImpl {} diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index 8bc99db9e2..e9b0c22692 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -31,19 +31,20 @@ pub use sp_inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFa /// ```nocompile /// impl_outer_inherent! { /// impl Inherents where Block = Block, UncheckedExtrinsic = UncheckedExtrinsic { -/// timestamp: Timestamp, -/// consensus: Consensus, -/// /// Aura module using the `Timestamp` call. -/// aura: Timestamp, +/// timestamp, +/// consensus, +/// aura, /// } /// } /// ``` #[macro_export] macro_rules! impl_outer_inherent { ( - impl Inherents where Block = $block:ident, UncheckedExtrinsic = $uncheckedextrinsic:ident + impl Inherents where + Block = $block:ident, + UncheckedExtrinsic = $uncheckedextrinsic:ident { - $( $module:ident: $call:ident, )* + $( $module:ident, )* } ) => { trait InherentDataExt { @@ -55,15 +56,14 @@ macro_rules! impl_outer_inherent { impl InherentDataExt for $crate::inherent::InherentData { fn create_extrinsics(&self) -> $crate::inherent::Vec<<$block as $crate::inherent::BlockT>::Extrinsic> { - use $crate::inherent::ProvideInherent; - use $crate::inherent::Extrinsic; + use $crate::inherent::{ProvideInherent, Extrinsic}; let mut inherents = Vec::new(); $( if let Some(inherent) = $module::create_inherent(self) { inherents.push($uncheckedextrinsic::new( - Call::$call(inherent), + inherent.into(), None, ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return `Some`; qed")); } @@ -74,6 +74,7 @@ macro_rules! impl_outer_inherent { fn check_extrinsics(&self, block: &$block) -> $crate::inherent::CheckInherentsResult { use $crate::inherent::{ProvideInherent, IsFatalError}; + use $crate::dispatch::IsSubType; let mut result = $crate::inherent::CheckInherentsResult::new(); for xt in block.extrinsics() { @@ -81,21 +82,18 @@ macro_rules! impl_outer_inherent { break } - $( - match xt.function { - Call::$call(ref call) => { - if let Err(e) = $module::check_inherent(call, self) { - result.put_error( - $module::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result - } + $({ + if let Some(call) = IsSubType::<_>::is_sub_type(&xt.function) { + if let Err(e) = $module::check_inherent(call, self) { + result.put_error( + $module::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result } } - _ => {}, } - )* + })* } $( @@ -106,10 +104,10 @@ macro_rules! impl_outer_inherent { return false } - match xt.function { - Call::$call(_) => true, - _ => false, - } + let call: Option<&<$module as ProvideInherent>::Call> = + xt.function.is_sub_type(); + + call.is_some() }); if !found { @@ -138,3 +136,139 @@ macro_rules! impl_outer_inherent { } }; } + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::{traits, testing::{Header, self}}; + use crate::dispatch::IsSubType; + + #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] + enum Call { + Test(CallTest), + Test2(CallTest2), + } + + impl From for Call { + fn from(call: CallTest) -> Self { + Self::Test(call) + } + } + + impl From for Call { + fn from(call: CallTest2) -> Self { + Self::Test2(call) + } + } + + impl IsSubType for Call { + fn is_sub_type(&self) -> Option<&CallTest> { + match self { + Self::Test(test) => Some(test), + _ => None, + } + } + } + + impl IsSubType for Call { + fn is_sub_type(&self) -> Option<&CallTest2> { + match self { + Self::Test2(test) => Some(test), + _ => None, + } + } + } + + #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] + enum CallTest { + Something, + SomethingElse, + } + + #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] + enum CallTest2 { + Something, + } + + struct ModuleTest; + impl ProvideInherent for ModuleTest { + type Call = CallTest; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1235"; + + fn create_inherent(_: &InherentData) -> Option { + Some(CallTest::Something) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + CallTest::Something => Ok(()), + CallTest::SomethingElse => Err(().into()), + } + } + } + + struct ModuleTest2; + impl ProvideInherent for ModuleTest2 { + type Call = CallTest2; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1234"; + + fn create_inherent(_: &InherentData) -> Option { + Some(CallTest2::Something) + } + } + + type Block = testing::Block; + + #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] + struct Extrinsic { + function: Call, + } + + impl traits::Extrinsic for Extrinsic { + type Call = Call; + type SignaturePayload = (); + + fn new(function: Call, _: Option<()>) -> Option { + Some(Self { function }) + } + } + + parity_util_mem::malloc_size_of_is_0!(Extrinsic); + + impl_outer_inherent! { + impl Inherents where Block = Block, UncheckedExtrinsic = Extrinsic { + ModuleTest, + ModuleTest2, + } + } + + #[test] + fn create_inherents_works() { + let inherents = InherentData::new().create_extrinsics(); + + let expected = vec![ + Extrinsic { function: Call::Test(CallTest::Something) }, + Extrinsic { function: Call::Test2(CallTest2::Something) }, + ]; + assert_eq!(expected, inherents); + } + + #[test] + fn check_inherents_works() { + let block = Block::new( + Header::new_from_number(1), + vec![Extrinsic { function: Call::Test(CallTest::Something) }], + ); + + assert!(InherentData::new().check_extrinsics(&block).ok()); + + let block = Block::new( + Header::new_from_number(1), + vec![Extrinsic { function: Call::Test(CallTest::SomethingElse) }], + ); + + assert!(InherentData::new().check_extrinsics(&block).fatal_error()); + } +} diff --git a/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.rs b/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.rs deleted file mode 100644 index 9c752a2f39..0000000000 --- a/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.rs +++ /dev/null @@ -1,14 +0,0 @@ -use frame_support::construct_runtime; - -construct_runtime! { - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: system::{Module}, - Balance: balances::::{Call(toto), Origin}, - } -} - -fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.stderr b/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.stderr deleted file mode 100644 index 58f35720e3..0000000000 --- a/frame/support/test/tests/construct_runtime_ui/params_in_invalid_module.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: `Call` is not allowed to have arguments in parens. Only the following modules are allowed to have arguments in parens: `Inherent`. - --> $DIR/params_in_invalid_module.rs:10:40 - | -10 | Balance: balances::::{Call(toto), Origin}, - | ^^^^^^ -- GitLab From 0ab1c4f945c8cfb8687c34c926ffd1761d124eba Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 6 Jul 2020 12:51:01 +0200 Subject: [PATCH 127/144] Derive `RuntimeDebug` for `Runtime` (#6581) --- frame/support/procedural/src/construct_runtime/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 569413cbbc..3aca3f8de8 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -99,8 +99,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Date: Tue, 7 Jul 2020 11:41:55 +0200 Subject: [PATCH 128/144] Fix UI tests (#6589) --- Cargo.lock | 44 ++++++++++++++-------------- frame/support/Cargo.toml | 2 +- frame/transaction-payment/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c64da43d44..8862553173 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -779,7 +779,7 @@ dependencies = [ "log", "regalloc", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", "target-lexicon", "thiserror", ] @@ -817,7 +817,7 @@ checksum = "e45f82e3446dd1ebb8c2c2f6a6b0e6cd6cd52965c7e5f7b1b35e9a9ace31ccde" dependencies = [ "cranelift-codegen", "log", - "smallvec 1.4.0", + "smallvec 1.4.1", "target-lexicon", ] @@ -1537,7 +1537,7 @@ dependencies = [ "paste", "pretty_assertions", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-arithmetic", "sp-core", "sp-inherents", @@ -1951,7 +1951,7 @@ dependencies = [ "byteorder", "fallible-iterator", "indexmap", - "smallvec 1.4.0", + "smallvec 1.4.1", "stable_deref_trait", ] @@ -2595,7 +2595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e763b2a9b500ba47948061d1e8bc3b5f03a8a1f067dbcf822a4d2c84d2b54a3a" dependencies = [ "parity-util-mem", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -2624,7 +2624,7 @@ dependencies = [ "parking_lot 0.10.2", "regex", "rocksdb", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -2724,7 +2724,7 @@ dependencies = [ "parity-multiaddr 0.9.1", "parking_lot 0.10.2", "pin-project", - "smallvec 1.4.0", + "smallvec 1.4.1", "wasm-timer", ] @@ -2755,7 +2755,7 @@ dependencies = [ "ring", "rw-stream-sink", "sha2", - "smallvec 1.4.0", + "smallvec 1.4.1", "thiserror", "unsigned-varint 0.4.0", "void", @@ -2795,7 +2795,7 @@ dependencies = [ "log", "prost", "prost-build", - "smallvec 1.4.0", + "smallvec 1.4.1", "wasm-timer", ] @@ -2819,7 +2819,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "sha2", - "smallvec 1.4.0", + "smallvec 1.4.1", "uint", "unsigned-varint 0.4.0", "void", @@ -2843,7 +2843,7 @@ dependencies = [ "log", "net2", "rand 0.7.3", - "smallvec 1.4.0", + "smallvec 1.4.1", "void", "wasm-timer", ] @@ -2940,7 +2940,7 @@ dependencies = [ "libp2p-core", "log", "rand 0.7.3", - "smallvec 1.4.0", + "smallvec 1.4.1", "void", "wasm-timer", ] @@ -3317,7 +3317,7 @@ dependencies = [ "futures 0.3.5", "log", "pin-project", - "smallvec 1.4.0", + "smallvec 1.4.1", "unsigned-varint 0.4.0", ] @@ -4731,7 +4731,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-core", "sp-io", "sp-runtime", @@ -4950,7 +4950,7 @@ dependencies = [ "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", - "smallvec 1.4.0", + "smallvec 1.4.1", "winapi 0.3.8", ] @@ -5026,7 +5026,7 @@ dependencies = [ "cloudabi", "libc", "redox_syscall", - "smallvec 1.4.0", + "smallvec 1.4.1", "winapi 0.3.8", ] @@ -5736,7 +5736,7 @@ checksum = "b27b256b41986ac5141b37b8bbba85d314fbf546c182eb255af6720e07e4f804" dependencies = [ "log", "rustc-hash", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -7354,9 +7354,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" +checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" [[package]] name = "snow" @@ -7998,7 +7998,7 @@ dependencies = [ "parking_lot 0.10.2", "pretty_assertions", "rand 0.7.3", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-core", "sp-externalities", "sp-panic-handler", @@ -9175,7 +9175,7 @@ dependencies = [ "hashbrown", "log", "rustc-hex", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -9279,7 +9279,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" dependencies = [ - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 2cfe1619a9..14a1d1b022 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -29,7 +29,7 @@ once_cell = { version = "1", default-features = false, optional = true } sp-state-machine = { version = "0.8.0-rc4", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" -smallvec = "1.4.0" +smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index c1409c2675..cc26af45d7 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../../pr frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc4", default-features = false, path = "./rpc/runtime-api" } -smallvec = "1.4.0" +smallvec = "1.4.1" [dev-dependencies] sp-io = { version = "2.0.0-rc4", path = "../../primitives/io" } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 2545f52760..96eeb2839a 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -26,7 +26,7 @@ num-traits = "0.2.8" rand = "0.7.2" sp-externalities = { version = "0.8.0-rc4", path = "../externalities" } itertools = "0.9" -smallvec = "1.4" +smallvec = "1.4.1" [dev-dependencies] hex-literal = "0.2.1" -- GitLab From ffc5797a674f5891526f4a9de0bd8eb40ad9a75d Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 7 Jul 2020 12:32:30 +0200 Subject: [PATCH 129/144] Use async/await in build_network_future (#6533) * Use async/await in build_network_future * Address concerns * Fix test --- Cargo.lock | 1 - client/informant/Cargo.toml | 3 +- client/informant/src/lib.rs | 5 +- client/service/src/builder.rs | 12 +- client/service/src/lib.rs | 246 +++++++++++++++------------ primitives/utils/src/status_sinks.rs | 152 ++++++++++++----- 6 files changed, 253 insertions(+), 166 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8862553173..707b6a66a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6564,7 +6564,6 @@ dependencies = [ "futures 0.3.5", "log", "parity-util-mem", - "parking_lot 0.10.2", "sc-client-api", "sc-network", "sp-blockchain", diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index d2df78537d..98c72f5deb 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -16,11 +16,10 @@ ansi_term = "0.12.1" futures = "0.3.4" log = "0.4.8" parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } -wasm-timer = "0.2" sc-client-api = { version = "2.0.0-rc4", path = "../api" } sc-network = { version = "0.8.0-rc4", path = "../network" } sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } sp-utils = { version = "2.0.0-rc2", path = "../../primitives/utils" } sp-transaction-pool = { version = "2.0.0-rc2", path = "../../primitives/transaction-pool" } -parking_lot = "0.10.2" +wasm-timer = "0.2" diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index d56afcf335..3daf29a9f7 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -29,7 +29,6 @@ use sp_runtime::traits::{Block as BlockT, Header}; use sp_transaction_pool::TransactionPool; use sp_utils::{status_sinks, mpsc::tracing_unbounded}; use std::{fmt::Display, sync::Arc, time::Duration, collections::VecDeque}; -use parking_lot::Mutex; mod display; @@ -82,7 +81,7 @@ impl TransactionPoolAndMaybeMallogSizeOf for /// Builds the informant and returns a `Future` that drives the informant. pub fn build( client: Arc, - network_status_sinks: Arc, NetworkState)>>>, + network_status_sinks: Arc, NetworkState)>>, pool: Arc, format: OutputFormat, ) -> impl futures::Future @@ -94,7 +93,7 @@ where let client_1 = client.clone(); let (network_status_sink, network_status_stream) = tracing_unbounded("mpsc_network_status"); - network_status_sinks.lock().push(Duration::from_millis(5000), network_status_sink); + network_status_sinks.push(Duration::from_millis(5000), network_status_sink); let display_notifications = network_status_stream .for_each(move |(net_status, _)| { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 16b41e135a..1585298d98 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1157,7 +1157,7 @@ async fn telemetry_periodic_send( client: Arc, transaction_pool: Arc, mut metrics_service: MetricsService, - network_status_sinks: Arc, NetworkState)>>> + network_status_sinks: Arc, NetworkState)>> ) where TBl: BlockT, @@ -1165,7 +1165,7 @@ async fn telemetry_periodic_send( TExPool: MaintainedTransactionPool::Hash>, { let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); - network_status_sinks.lock().push(std::time::Duration::from_millis(5000), state_tx); + network_status_sinks.push(std::time::Duration::from_millis(5000), state_tx); state_rx.for_each(move |(net_status, _)| { let info = client.usage_info(); metrics_service.tick( @@ -1178,11 +1178,11 @@ async fn telemetry_periodic_send( } async fn telemetry_periodic_network_state( - network_status_sinks: Arc, NetworkState)>>> + network_status_sinks: Arc, NetworkState)>> ) { // Periodically send the network state to the telemetry. let (netstat_tx, netstat_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); - network_status_sinks.lock().push(std::time::Duration::from_secs(30), netstat_tx); + network_status_sinks.push(std::time::Duration::from_secs(30), netstat_tx); netstat_rx.for_each(move |(_, network_state)| { telemetry!( SUBSTRATE_INFO; @@ -1347,7 +1347,7 @@ fn build_network( ) -> Result< ( Arc::Hash>>, - Arc, NetworkState)>>>, + Arc, NetworkState)>>, Pin + Send>> ), Error @@ -1407,7 +1407,7 @@ fn build_network( let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let network_status_sinks = Arc::new(Mutex::new(status_sinks::StatusSinks::new())); + let network_status_sinks = Arc::new(status_sinks::StatusSinks::new()); let future = build_network_future( config.role.clone(), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 978b77974f..2c09591fc7 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -20,7 +20,7 @@ //! Manages communication between them. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "1024"] pub mod config; pub mod chain_ops; @@ -42,7 +42,7 @@ use wasm_timer::Instant; use std::task::Poll; use parking_lot::Mutex; -use futures::{Future, FutureExt, Stream, StreamExt, compat::*}; +use futures::{Future, FutureExt, Stream, StreamExt, stream, compat::*}; use sc_network::{NetworkStatus, network_state::NetworkState, PeerId}; use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; @@ -118,12 +118,12 @@ impl RpcHandlers { /// Sinks to propagate network status updates. /// For each element, every time the `Interval` fires we push an element on the sender. pub struct NetworkStatusSinks( - Arc, NetworkState)>>>, + Arc, NetworkState)>>, ); impl NetworkStatusSinks { fn new( - sinks: Arc, NetworkState)>>> + sinks: Arc, NetworkState)>> ) -> Self { Self(sinks) } @@ -132,7 +132,7 @@ impl NetworkStatusSinks { pub fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { let (sink, stream) = tracing_unbounded("mpsc_network_status"); - self.0.lock().push(interval, sink); + self.0.push(interval, sink); stream } } @@ -181,7 +181,7 @@ pub struct ServiceComponents, TSc, TExPool, /// Builds a never-ending future that continuously polls the network. /// /// The `status_sink` contain a list of senders to send a periodic network status to. -fn build_network_future< +async fn build_network_future< B: BlockT, C: BlockchainEvents, H: sc_network::ExHashT @@ -189,126 +189,150 @@ fn build_network_future< role: Role, mut network: sc_network::NetworkWorker, client: Arc, - status_sinks: Arc, NetworkState)>>>, + status_sinks: Arc, NetworkState)>>, mut rpc_rx: TracingUnboundedReceiver>, should_have_peers: bool, announce_imported_blocks: bool, -) -> impl Future { +) { let mut imported_blocks_stream = client.import_notification_stream().fuse(); - let mut finality_notification_stream = client.finality_notification_stream().fuse(); - futures::future::poll_fn(move |cx| { - let before_polling = Instant::now(); + // Stream of finalized blocks reported by the client. + let mut finality_notification_stream = { + let mut finality_notification_stream = client.finality_notification_stream().fuse(); - // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = Pin::new(&mut imported_blocks_stream).poll_next(cx) { - if announce_imported_blocks { - network.service().announce_block(notification.hash, Vec::new()); + // We tweak the `Stream` in order to merge together multiple items if they happen to be + // ready. This way, we only get the latest finalized block. + stream::poll_fn(move |cx| { + let mut last = None; + while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { + last = Some(item); } - - if let sp_consensus::BlockOrigin::Own = notification.origin { - network.service().own_block_imported( - notification.hash, - notification.header.number().clone(), - ); + if let Some(last) = last { + Poll::Ready(Some(last)) + } else { + Poll::Pending } - } + }).fuse() + }; - // We poll `finality_notification_stream`, but we only take the last event. - let mut last = None; - while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { - last = Some(item); - } - if let Some(notification) = last { - network.on_block_finalized(notification.hash, notification.header); - } + loop { + let before_polling = Instant::now(); - // Poll the RPC requests and answer them. - while let Poll::Ready(Some(request)) = Pin::new(&mut rpc_rx).poll_next(cx) { - match request { - sc_rpc::system::Request::Health(sender) => { - let _ = sender.send(sc_rpc::system::Health { - peers: network.peers_debug_info().len(), - is_syncing: network.service().is_major_syncing(), - should_have_peers, - }); - }, - sc_rpc::system::Request::LocalPeerId(sender) => { - let _ = sender.send(network.local_peer_id().to_base58()); - }, - sc_rpc::system::Request::LocalListenAddresses(sender) => { - let peer_id = network.local_peer_id().clone().into(); - let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id); - let addresses = network.listen_addresses() - .map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string()) - .collect(); - let _ = sender.send(addresses); - }, - sc_rpc::system::Request::Peers(sender) => { - let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)| - sc_rpc::system::PeerInfo { - peer_id: peer_id.to_base58(), - roles: format!("{:?}", p.roles), - protocol_version: p.protocol_version, - best_hash: p.best_hash, - best_number: p.best_number, - } - ).collect()); + futures::select!{ + // List of blocks that the client has imported. + notification = imported_blocks_stream.next() => { + let notification = match notification { + Some(n) => n, + // If this stream is shut down, that means the client has shut down, and the + // most appropriate thing to do for the network future is to shut down too. + None => return, + }; + + if announce_imported_blocks { + network.service().announce_block(notification.hash, Vec::new()); } - sc_rpc::system::Request::NetworkState(sender) => { - if let Some(network_state) = serde_json::to_value(&network.network_state()).ok() { - let _ = sender.send(network_state); - } - } - sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { - let x = network.add_reserved_peer(peer_addr) - .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); - let _ = sender.send(x); + + if let sp_consensus::BlockOrigin::Own = notification.origin { + network.service().own_block_imported( + notification.hash, + notification.header.number().clone(), + ); } - sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { - let _ = match peer_id.parse::() { - Ok(peer_id) => { - network.remove_reserved_peer(peer_id); - sender.send(Ok(())) + } + + // List of blocks that the client has finalized. + notification = finality_notification_stream.select_next_some() => { + network.on_block_finalized(notification.hash, notification.header); + } + + // Answer incoming RPC requests. + request = rpc_rx.select_next_some() => { + match request { + sc_rpc::system::Request::Health(sender) => { + let _ = sender.send(sc_rpc::system::Health { + peers: network.peers_debug_info().len(), + is_syncing: network.service().is_major_syncing(), + should_have_peers, + }); + }, + sc_rpc::system::Request::LocalPeerId(sender) => { + let _ = sender.send(network.local_peer_id().to_base58()); + }, + sc_rpc::system::Request::LocalListenAddresses(sender) => { + let peer_id = network.local_peer_id().clone().into(); + let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id); + let addresses = network.listen_addresses() + .map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string()) + .collect(); + let _ = sender.send(addresses); + }, + sc_rpc::system::Request::Peers(sender) => { + let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)| + sc_rpc::system::PeerInfo { + peer_id: peer_id.to_base58(), + roles: format!("{:?}", p.roles), + protocol_version: p.protocol_version, + best_hash: p.best_hash, + best_number: p.best_number, + } + ).collect()); + } + sc_rpc::system::Request::NetworkState(sender) => { + if let Some(network_state) = serde_json::to_value(&network.network_state()).ok() { + let _ = sender.send(network_state); } - Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg( - e.to_string(), - ))), - }; - } - sc_rpc::system::Request::NodeRoles(sender) => { - use sc_rpc::system::NodeRole; + } + sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { + let x = network.add_reserved_peer(peer_addr) + .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let _ = sender.send(x); + } + sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { + let _ = match peer_id.parse::() { + Ok(peer_id) => { + network.remove_reserved_peer(peer_id); + sender.send(Ok(())) + } + Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg( + e.to_string(), + ))), + }; + } + sc_rpc::system::Request::NodeRoles(sender) => { + use sc_rpc::system::NodeRole; - let node_role = match role { - Role::Authority { .. } => NodeRole::Authority, - Role::Light => NodeRole::LightClient, - Role::Full => NodeRole::Full, - Role::Sentry { .. } => NodeRole::Sentry, - }; + let node_role = match role { + Role::Authority { .. } => NodeRole::Authority, + Role::Light => NodeRole::LightClient, + Role::Full => NodeRole::Full, + Role::Sentry { .. } => NodeRole::Sentry, + }; - let _ = sender.send(vec![node_role]); + let _ = sender.send(vec![node_role]); + } } - }; - } + } - // Interval report for the external API. - status_sinks.lock().poll(cx, || { - let status = NetworkStatus { - sync_state: network.sync_state(), - best_seen_block: network.best_seen_block(), - num_sync_peers: network.num_sync_peers(), - num_connected_peers: network.num_connected_peers(), - num_active_peers: network.num_active_peers(), - average_download_per_sec: network.average_download_per_sec(), - average_upload_per_sec: network.average_upload_per_sec(), - }; - let state = network.network_state(); - (status, state) - }); - - // Main network polling. - if let Poll::Ready(()) = network.poll_unpin(cx) { - return Poll::Ready(()); + // The network worker has done something. Nothing special to do, but could be + // used in the future to perform actions in response of things that happened on + // the network. + _ = (&mut network).fuse() => {} + + // At a regular interval, we send the state of the network on what is called + // the "status sinks". + ready_sink = status_sinks.next().fuse() => { + let status = NetworkStatus { + sync_state: network.sync_state(), + best_seen_block: network.best_seen_block(), + num_sync_peers: network.num_sync_peers(), + num_connected_peers: network.num_connected_peers(), + num_active_peers: network.num_active_peers(), + average_download_per_sec: network.average_download_per_sec(), + average_upload_per_sec: network.average_upload_per_sec(), + }; + let state = network.network_state(); + ready_sink.send((status, state)); + } } // Now some diagnostic for performances. @@ -319,9 +343,7 @@ fn build_network_future< "⚠️ Polling the network future took {:?}", polling_dur ); - - Poll::Pending - }) + } } #[cfg(not(target_os = "unknown"))] diff --git a/primitives/utils/src/status_sinks.rs b/primitives/utils/src/status_sinks.rs index 47bccebb96..65a560af4e 100644 --- a/primitives/utils/src/status_sinks.rs +++ b/primitives/utils/src/status_sinks.rs @@ -14,19 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures::{Stream, stream::futures_unordered::FuturesUnordered}; -use std::time::Duration; -use std::pin::Pin; -use std::task::{Poll, Context}; +use crate::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use futures::{prelude::*, lock::Mutex}; use futures_timer::Delay; -use crate::mpsc::TracingUnboundedSender; +use std::{pin::Pin, task::{Poll, Context}, time::Duration}; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. /// /// Senders are removed only when they are closed. pub struct StatusSinks { - entries: FuturesUnordered>, + /// Should only be locked by `next`. + inner: Mutex>, + /// Sending side of `Inner::entries_rx`. + entries_tx: TracingUnboundedSender>, +} + +struct Inner { + /// The actual entries of the list. + entries: stream::FuturesUnordered>, + /// Receives new entries and puts them in `entries`. + entries_rx: TracingUnboundedReceiver>, } struct YieldAfter { @@ -38,56 +46,114 @@ struct YieldAfter { impl StatusSinks { /// Builds a new empty collection. pub fn new() -> StatusSinks { + let (entries_tx, entries_rx) = tracing_unbounded("status-sinks-entries"); + StatusSinks { - entries: FuturesUnordered::new(), + inner: Mutex::new(Inner { + entries: stream::FuturesUnordered::new(), + entries_rx, + }), + entries_tx, } } /// Adds a sender to the collection. /// /// The `interval` is the time period between two pushes on the sender. - pub fn push(&mut self, interval: Duration, sender: TracingUnboundedSender) { - self.entries.push(YieldAfter { + pub fn push(&self, interval: Duration, sender: TracingUnboundedSender) { + let _ = self.entries_tx.unbounded_send(YieldAfter { delay: Delay::new(interval), interval, sender: Some(sender), - }) + }); } - /// Processes all the senders. If any sender is ready, calls the `status_grab` function and - /// pushes what it returns to the sender. + /// Waits until one of the sinks is ready, then returns an object that can be used to send + /// an element on said sink. /// - /// This function doesn't return anything, but it should be treated as if it implicitly - /// returns `Poll::Pending`. In particular, it should be called again when the task - /// is waken up. - /// - /// # Panic - /// - /// Panics if not called within the context of a task. - pub fn poll(&mut self, cx: &mut Context, mut status_grab: impl FnMut() -> T) { + /// If the object isn't used to send an element, the slot is skipped. + pub async fn next(&self) -> ReadySinkEvent<'_, T> { + // This is only ever locked by `next`, which means that one `next` at a time can run. + let mut inner = self.inner.lock().await; + let inner = &mut *inner; + loop { - match Pin::new(&mut self.entries).poll_next(cx) { - Poll::Ready(Some((sender, interval))) => { - let status = status_grab(); - if sender.unbounded_send(status).is_ok() { - self.entries.push(YieldAfter { - // Note that since there's a small delay between the moment a task is - // waken up and the moment it is polled, the period is actually not - // `interval` but `interval + `. We ignore this problem in - // practice. - delay: Delay::new(interval), - interval, - sender: Some(sender), - }); + // Future that produces the next ready entry in `entries`, or doesn't produce anything if + // the list is empty. + let next_ready_entry = { + let entries = &mut inner.entries; + async move { + if let Some(v) = entries.next().await { + v + } else { + loop { + futures::pending!() + } + } + } + }; + + futures::select!{ + new_entry = inner.entries_rx.next() => { + if let Some(new_entry) = new_entry { + inner.entries.push(new_entry); + } + }, + (sender, interval) = next_ready_entry.fuse() => { + return ReadySinkEvent { + sinks: self, + sender: Some(sender), + interval, } } - Poll::Ready(None) | - Poll::Pending => break, } } } } +/// One of the sinks is ready. +#[must_use] +pub struct ReadySinkEvent<'a, T> { + sinks: &'a StatusSinks, + sender: Option>, + interval: Duration, +} + +impl<'a, T> ReadySinkEvent<'a, T> { + /// Sends an element on the sender. + pub fn send(mut self, element: T) { + if let Some(sender) = self.sender.take() { + if sender.unbounded_send(element).is_ok() { + let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { + // Note that since there's a small delay between the moment a task is + // woken up and the moment it is polled, the period is actually not + // `interval` but `interval + `. We ignore this problem in + // practice. + delay: Delay::new(self.interval), + interval: self.interval, + sender: Some(sender), + }); + } + } + } +} + +impl<'a, T> Drop for ReadySinkEvent<'a, T> { + fn drop(&mut self) { + if let Some(sender) = self.sender.take() { + if sender.is_closed() { + return; + } + + let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { + delay: Delay::new(self.interval), + interval: self.interval, + sender: Some(sender), + }); + } + } +} + impl futures::Future for YieldAfter { type Output = (TracingUnboundedSender, Duration); @@ -107,28 +173,30 @@ impl futures::Future for YieldAfter { #[cfg(test)] mod tests { + use crate::mpsc::tracing_unbounded; use super::StatusSinks; use futures::prelude::*; - use crate::mpsc::tracing_unbounded; use std::time::Duration; - use std::task::Poll; #[test] fn works() { // We're not testing that the `StatusSink` properly enforces an order in the intervals, as // this easily causes test failures on busy CPUs. - let mut status_sinks = StatusSinks::new(); + let status_sinks = StatusSinks::new(); - let (tx, rx) = tracing_unbounded("status_sink_test"); + let (tx, rx) = tracing_unbounded("test"); status_sinks.push(Duration::from_millis(100), tx); let mut val_order = 5; futures::executor::block_on(futures::future::select( - futures::future::poll_fn(move |cx| { - status_sinks.poll(cx, || { val_order += 1; val_order }); - Poll::<()>::Pending + Box::pin(async move { + loop { + let ev = status_sinks.next().await; + val_order += 1; + ev.send(val_order); + } }), Box::pin(async { let items: Vec = rx.take(3).collect().await; -- GitLab From 4e0a1b1a2a9922e1356d5c9bd8e35fb24a823fc1 Mon Sep 17 00:00:00 2001 From: Shaopeng Wang Date: Wed, 8 Jul 2020 11:06:21 +1200 Subject: [PATCH 130/144] decl_module! macro: use 'frame_system' instead of `system` as default ident (#6500) * Use frame_system as default ident. * Remove unused 'frame_system' to 'system' renaming. * Fix construct_runtime_ui tests. * Rename system to frame_system in sudo/utility pallet test. * Bump runtime impl_version. * Update formatting. --- bin/node-template/pallets/template/src/lib.rs | 8 ++++---- bin/node/runtime/src/lib.rs | 2 +- frame/assets/src/lib.rs | 4 ++-- frame/contracts/src/lib.rs | 2 +- frame/elections-phragmen/src/lib.rs | 5 ++--- frame/elections/src/lib.rs | 2 +- frame/evm/src/lib.rs | 2 +- frame/example/src/lib.rs | 2 +- frame/generic-asset/src/lib.rs | 2 +- frame/grandpa/src/lib.rs | 2 +- frame/identity/src/lib.rs | 2 +- frame/im-online/src/lib.rs | 4 ++-- frame/membership/src/lib.rs | 2 +- frame/nicks/src/lib.rs | 2 +- frame/offences/src/lib.rs | 1 - frame/scored-pool/src/lib.rs | 4 ++-- frame/session/src/lib.rs | 8 ++++---- frame/staking/src/lib.rs | 2 +- frame/sudo/src/lib.rs | 4 ++-- frame/sudo/src/mock.rs | 10 +++++----- .../support/procedural/src/construct_runtime/mod.rs | 2 +- frame/support/src/dispatch.rs | 12 ++++++------ frame/support/src/error.rs | 2 +- frame/support/src/weights.rs | 12 ++++++------ .../missing_system_module.stderr | 2 +- frame/support/test/tests/instance.rs | 4 +++- frame/timestamp/src/lib.rs | 2 +- frame/utility/src/lib.rs | 2 +- frame/utility/src/tests.rs | 4 ++-- frame/vesting/src/lib.rs | 2 +- 30 files changed, 57 insertions(+), 57 deletions(-) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 1f82857c43..7514a46c1c 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -10,7 +10,7 @@ /// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch}; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; #[cfg(test)] mod mock; @@ -19,11 +19,11 @@ mod mock; mod tests; /// The pallet's configuration trait. -pub trait Trait: system::Trait { +pub trait Trait: frame_system::Trait { // Add other types and constants required to configure this pallet. /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } // This pallet's storage items. @@ -41,7 +41,7 @@ decl_storage! { // The pallet's events decl_event!( - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// Just a dummy event. /// Event `Something` is declared with a parameter of the type `u32` and `AccountId` /// To emit this event, we call the deposit function, from our runtime functions diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 07b0a82e48..85c3aef41c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -98,7 +98,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 255, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 159546ccb3..1445c53082 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -87,7 +87,7 @@ //! ```rust,ignore //! use pallet_assets as assets; //! use frame_support::{decl_module, dispatch, ensure}; -//! use frame_system::{self as system, ensure_signed}; +//! use frame_system::ensure_signed; //! //! pub trait Trait: assets::Trait { } //! @@ -135,7 +135,7 @@ use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; use sp_runtime::traits::{Member, AtLeast32Bit, AtLeast32BitUnsigned, Zero, StaticLookup}; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; use sp_runtime::traits::One; /// The module configuration trait. diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 4db77a078e..182c6cd330 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -112,7 +112,7 @@ use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; use frame_support::weights::Weight; diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 9436a15d5c..63824dbf9c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -101,7 +101,7 @@ use frame_support::{ } }; use sp_npos_elections::{build_support_map, ExtendedBalance, VoteWeight, ElectionResult}; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; mod benchmarking; @@ -1060,7 +1060,6 @@ mod tests { traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, }; use crate as elections_phragmen; - use frame_system as system; parameter_types! { pub const BlockHashCount: u64 = 250; @@ -1225,7 +1224,7 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, + System: frame_system::{Module, Call, Event}, Balances: pallet_balances::{Module, Call, Event, Config}, Elections: elections_phragmen::{Module, Call, Event, Config}, } diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 171a2dbb8b..c9d3c5455f 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -38,7 +38,7 @@ use frame_support::{ } }; use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; mod mock; mod tests; diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index f7aa51e9ff..bb08592ecd 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -33,7 +33,7 @@ use serde::{Serialize, Deserialize}; use frame_support::{ensure, decl_module, decl_storage, decl_event, decl_error}; use frame_support::weights::Weight; use frame_support::traits::{Currency, WithdrawReason, ExistenceRequirement, Get}; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; use sp_runtime::ModuleId; use sp_core::{U256, H256, H160, Hasher}; use sp_runtime::{ diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 00e0d78d9b..65e2e494d1 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -260,7 +260,7 @@ use frame_support::{ weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, }; use sp_std::prelude::*; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{ diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index 7d24f89d70..0f3d9fec74 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -171,7 +171,7 @@ use frame_support::{ }, Parameter, StorageMap, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; mod mock; mod tests; diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 3b3e595ad1..91d783cb1a 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -43,7 +43,7 @@ use frame_support::{ decl_error, decl_event, decl_module, decl_storage, storage, traits::KeyOwnerProofSystem, Parameter, }; -use frame_system::{self as system, ensure_signed, DigestOf}; +use frame_system::{ensure_signed, DigestOf}; use sp_runtime::{ generic::{DigestItem, OpaqueDigestItemId}, traits::Zero, diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 19b23a644d..b4c161aabb 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -78,7 +78,7 @@ use frame_support::{ traits::{Currency, ReservableCurrency, OnUnbalanced, Get, BalanceStatus, EnsureOrigin}, weights::Weight, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; mod benchmarking; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index ddbbb52bd2..a755b5d2d1 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -44,7 +44,7 @@ //! //! ``` //! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_signed}; +//! use frame_system::ensure_signed; //! use pallet_im_online::{self as im_online}; //! //! pub trait Trait: im_online::Trait {} @@ -97,7 +97,7 @@ use frame_support::{ traits::Get, weights::Weight, }; -use frame_system::{self as system, ensure_none}; +use frame_system::ensure_none; use frame_system::offchain::{ SendTransactionTypes, SubmitTransaction, diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index c8563b52f8..bf6c7ec486 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -28,7 +28,7 @@ use frame_support::{ decl_module, decl_storage, decl_event, decl_error, traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains}, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; pub trait Trait: frame_system::Trait { /// The overarching event type. diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 93c6081941..27a0dedd7e 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -47,7 +47,7 @@ use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, traits::{Currency, EnsureOrigin, ReservableCurrency, OnUnbalanced, Get}, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 267e6e14c9..5899c22fb0 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -37,7 +37,6 @@ use sp_staking::{ offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails, OffenceError}, }; use codec::{Encode, Decode}; -use frame_system as system; /// A binary blob which represents a SCALE codec-encoded `O::TimeSlot`. type OpaqueTimeSlot = Vec; diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 81ee92aeb4..35c36b0319 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -55,7 +55,7 @@ //! //! ``` //! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_signed}; +//! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! //! pub trait Trait: scored_pool::Trait {} @@ -100,7 +100,7 @@ use frame_support::{ traits::{EnsureOrigin, ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, weights::Weight, }; -use frame_system::{self as system, ensure_root, ensure_signed}; +use frame_system::{ensure_root, ensure_signed}; use sp_runtime::traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 6f5630adf9..0cd77af7c8 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -113,7 +113,7 @@ use frame_support::{ dispatch::{self, DispatchResult, DispatchError}, weights::Weight, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; #[cfg(test)] mod mock; @@ -434,7 +434,7 @@ decl_storage! { for (account, val, keys) in config.keys.iter().cloned() { >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); - system::Module::::inc_ref(&account); + frame_system::Module::::inc_ref(&account); } let initial_validators_0 = T::SessionManager::new_session(0) @@ -692,7 +692,7 @@ impl Module { let old_keys = Self::inner_set_keys(&who, keys)?; if old_keys.is_none() { - system::Module::::inc_ref(&account); + frame_system::Module::::inc_ref(&account); } Ok(()) @@ -740,7 +740,7 @@ impl Module { let key_data = old_keys.get_raw(*id); Self::clear_key_owner(*id, key_data); } - system::Module::::dec_ref(&account); + frame_system::Module::::dec_ref(&account); Ok(()) } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index a59214410d..1049096887 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -151,7 +151,7 @@ //! //! ``` //! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_signed}; +//! use frame_system::ensure_signed; //! use pallet_staking::{self as staking}; //! //! pub trait Trait: staking::Trait {} diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 233e75e869..cf55d9e67f 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -53,7 +53,7 @@ //! //! ``` //! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_root}; +//! use frame_system::ensure_root; //! //! pub trait Trait: frame_system::Trait {} //! @@ -94,7 +94,7 @@ use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, }; use frame_support::{weights::{Weight, GetDispatchInfo}, traits::UnfilteredDispatchable}; -use frame_system::{self as system, ensure_signed}; +use frame_system::ensure_signed; #[cfg(test)] mod mock; diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 3bf67f581b..74612fa879 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -35,8 +35,8 @@ pub mod logger { use super::*; use frame_system::ensure_root; - pub trait Trait: system::Trait { - type Event: From> + Into<::Event>; + pub trait Trait: frame_system::Trait { + type Event: From> + Into<::Event>; } decl_storage! { @@ -54,7 +54,7 @@ pub mod logger { } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { fn deposit_event() = default; #[weight = *weight] @@ -87,7 +87,7 @@ mod test_events { impl_outer_event! { pub enum TestEvent for Test { - system, + frame_system, sudo, logger, } @@ -161,7 +161,7 @@ impl Trait for Test { // Assign back to type variables in order to make dispatched calls of these modules later. pub type Sudo = Module; pub type Logger = logger::Module; -pub type System = system::Module; +pub type System = frame_system::Module; // New types for dispatchable functions. pub type SudoCall = sudo::Call; diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 3aca3f8de8..57827b0673 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -59,7 +59,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result},`", + Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},`", )) } }; diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index dc305357e7..810c67e039 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -70,7 +70,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{self as system, Trait, ensure_signed}; +/// # use frame_system::{Trait, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// @@ -112,7 +112,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{self as system, Trait, ensure_signed}; +/// # use frame_system::{Trait, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] @@ -147,7 +147,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; -/// # use frame_system::{self as system, Trait, ensure_signed}; +/// # use frame_system::{Trait, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] @@ -175,7 +175,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{self as system, Trait, ensure_signed, ensure_root}; +/// # use frame_system::{Trait, ensure_signed, ensure_root}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] @@ -292,7 +292,7 @@ macro_rules! decl_module { pub struct $mod_type< $trait_instance: $trait_name $(, I: $instantiable $(= $module_default_instance)?)? > - for enum $call_type where origin: $origin_type, system = system + for enum $call_type where origin: $origin_type, system = frame_system { $( $where_ty: $where_bound ),* } {} {} @@ -2339,7 +2339,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { + pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index 456ef3c461..d758ad52e7 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -47,7 +47,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// } /// } /// -/// # use frame_system::{self as system, Trait}; +/// # use frame_system::Trait; /// /// // You need to register the error type in `decl_module!` as well to make the error /// // exported in the metadata. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index f614bc4706..595e84333b 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -39,7 +39,7 @@ //! `Yes`**. //! //! ``` -//! # use frame_system::{self as system, Trait}; +//! # use frame_system::Trait; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 1000] @@ -52,7 +52,7 @@ //! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. //! //! ``` -//! # use frame_system::{self as system, Trait}; +//! # use frame_system::Trait; //! # use frame_support::weights::DispatchClass; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { @@ -66,7 +66,7 @@ //! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. //! //! ``` -//! # use frame_system::{self as system, Trait}; +//! # use frame_system::Trait; //! # use frame_support::weights::Pays; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { @@ -80,7 +80,7 @@ //! 3. Define all 3 parameters. //! //! ``` -//! # use frame_system::{self as system, Trait}; +//! # use frame_system::Trait; //! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { @@ -100,7 +100,7 @@ //! all 3 are static values, providing a raw tuple is easier. //! //! ``` -//! # use frame_system::{self as system, Trait}; +//! # use frame_system::Trait; //! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; //! frame_support::decl_module! { //! pub struct Module for enum Call where origin: T::Origin { @@ -589,7 +589,7 @@ pub trait WeightToFeePolynomial { Self::polynomial().iter().fold(Self::Balance::saturated_from(0u32), |mut acc, args| { let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); - // The sum could get negative. Therefore we only sum with the accumulator. + // The sum could get negative. Therefore we only sum with the accumulator. // The Perbill Mul implementation is non overflowing. let frac = args.coeff_frac * w; let integer = args.coeff_integer.saturating_mul(w); diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr index 442af9c01f..2ebe0721eb 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr @@ -1,4 +1,4 @@ -error: `System` module declaration is missing. Please add this line: `System: system::{Module, Call, Storage, Config, Event},` +error: `System` module declaration is missing. Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},` --> $DIR/missing_system_module.rs:8:2 | 8 | { diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index dde2e0ca9f..08389eed3a 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -51,6 +51,7 @@ mod module1 { frame_support::decl_module! { pub struct Module, I: InstantiableThing> for enum Call where origin: ::Origin, + system = system, T::BlockNumber: From { fn offchain_worker() {} @@ -129,7 +130,8 @@ mod module2 { frame_support::decl_module! { pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin + origin: ::Origin, + system = system { fn deposit_event() = default; } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 63456100a5..db15166e17 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -64,7 +64,7 @@ //! ``` //! use frame_support::{decl_module, dispatch}; //! # use pallet_timestamp as timestamp; -//! use frame_system::{self as system, ensure_signed}; +//! use frame_system::ensure_signed; //! //! pub trait Trait: timestamp::Trait {} //! diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 47ca4f13e7..ab50cf213b 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -64,7 +64,7 @@ use frame_support::{ traits::{OriginTrait, UnfilteredDispatchable}, weights::{Weight, GetDispatchInfo, DispatchClass}, dispatch::PostDispatchInfo, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; mod tests; diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 349d748a37..bf04378e54 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -34,7 +34,7 @@ impl_outer_origin! { } impl_outer_event! { pub enum TestEvent for Test { - system, + frame_system, pallet_balances, utility, } @@ -132,7 +132,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } fn last_event() -> TestEvent { - system::Module::::events().pop().map(|e| e.event).expect("Event expected") + frame_system::Module::::events().pop().map(|e| e.event).expect("Event expected") } fn expect_event>(e: E) { diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 5e11c8af95..32fa8ce441 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::traits::{ Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, ExistenceRequirement, Get }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{ensure_signed, ensure_root}; mod benchmarking; -- GitLab From 660ca2712a02d7ca6944bb7ee8cd3345b9540eb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 8 Jul 2020 10:42:56 +0200 Subject: [PATCH 131/144] Upgrade `kvdb-*`, `trie-db` and `memory-db` (#6584) * Upgrade `kvdb-*`, `trie-db` and `memory-db` The updates of `trie-db` and `memory-db` are important, as they fix the non-deterministic build of Polkadot/Substrate. * Change `trie-db` version * Update test-utils/runtime/Cargo.toml Co-authored-by: Andronik Ordian * Update primitives/trie/Cargo.toml Co-authored-by: Andronik Ordian * Update `Cargo.lock` and `trie-bench` * Fix UI tests * Switch to fixed version of memory-db Co-authored-by: Andronik Ordian --- Cargo.lock | 158 ++++++++++++++++------- bin/node/bench/Cargo.toml | 6 +- client/api/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/db/Cargo.toml | 10 +- client/informant/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- client/transaction-pool/graph/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/database/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/test-primitives/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 6 +- test-utils/runtime/Cargo.toml | 6 +- utils/browser/Cargo.toml | 2 +- 19 files changed, 136 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 707b6a66a2..c0c734533d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,6 +93,12 @@ dependencies = [ "const-random", ] +[[package]] +name = "ahash" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" + [[package]] name = "aho-corasick" version = "0.7.10" @@ -1533,7 +1539,7 @@ dependencies = [ "log", "once_cell", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "paste", "pretty_assertions", "serde", @@ -2062,10 +2068,20 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" dependencies = [ - "ahash", + "ahash 0.2.18", "autocfg 0.1.7", ] +[[package]] +name = "hashbrown" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" +dependencies = [ + "ahash 0.3.8", + "autocfg 1.0.0", +] + [[package]] name = "heck" version = "0.3.1" @@ -2594,7 +2610,17 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e763b2a9b500ba47948061d1e8bc3b5f03a8a1f067dbcf822a4d2c84d2b54a3a" dependencies = [ - "parity-util-mem", + "parity-util-mem 0.6.0", + "smallvec 1.4.1", +] + +[[package]] +name = "kvdb" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" +dependencies = [ + "parity-util-mem 0.7.0", "smallvec 1.4.1", ] @@ -2604,23 +2630,34 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73027d5e228de6f503b5b7335d530404fc26230a6ae3e09b33ec6e45408509a4" dependencies = [ - "kvdb", - "parity-util-mem", + "kvdb 0.6.0", + "parity-util-mem 0.6.0", + "parking_lot 0.10.2", +] + +[[package]] +name = "kvdb-memorydb" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" +dependencies = [ + "kvdb 0.7.0", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", ] [[package]] name = "kvdb-rocksdb" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84384eca250c7ff67877eda5336f28a86586aaee24acb945643590671f6bfce1" +checksum = "7c341ef15cfb1f923fa3b5138bfbd2d4813a2c1640b473727a53351c7f0b0fa2" dependencies = [ "fs-swap", - "kvdb", + "kvdb 0.7.0", "log", "num_cpus", "owning_ref", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "regex", "rocksdb", @@ -2629,16 +2666,16 @@ dependencies = [ [[package]] name = "kvdb-web" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c7f36acb1841d4c701d30ae1f2cfd242e805991443f75f6935479ed3de64903" +checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ "futures 0.3.5", "js-sys", - "kvdb", - "kvdb-memorydb", + "kvdb 0.7.0", + "kvdb-memorydb 0.7.0", "log", - "parity-util-mem", + "parity-util-mem 0.7.0", "send_wrapper 0.3.0", "wasm-bindgen", "web-sys", @@ -3107,7 +3144,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" dependencies = [ - "hashbrown", + "hashbrown 0.6.3", +] + +[[package]] +name = "lru" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c456c123957de3a220cd03786e0d86aa542a88b46029973b542f426da6ef34" +dependencies = [ + "hashbrown 0.6.3", ] [[package]] @@ -3167,14 +3213,13 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2999ff7a65d5a1d72172f6d51fa2ea03024b51aee709ba5ff81c3c629a2410" +checksum = "0777fbb396f666701d939e9b3876c18ada6b3581257d88631f2590bc366d8ebe" dependencies = [ - "ahash", "hash-db", - "hashbrown", - "parity-util-mem", + "hashbrown 0.8.0", + "parity-util-mem 0.7.0", ] [[package]] @@ -3394,7 +3439,7 @@ dependencies = [ "futures 0.3.5", "hash-db", "hex", - "kvdb", + "kvdb 0.7.0", "kvdb-rocksdb", "lazy_static", "log", @@ -3402,7 +3447,7 @@ dependencies = [ "node-runtime", "node-testing", "parity-db", - "parity-util-mem", + "parity-util-mem 0.7.0", "rand 0.7.3", "sc-basic-authorship", "sc-cli", @@ -4938,15 +4983,28 @@ dependencies = [ [[package]] name = "parity-util-mem" -version = "0.6.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6e2583649a3ca84894d1d71da249abcfda54d5aca24733d72ca10d0f02361c" +checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" +dependencies = [ + "cfg-if", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.10.2", + "winapi 0.3.8", +] + +[[package]] +name = "parity-util-mem" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if", "ethereum-types", - "hashbrown", + "hashbrown 0.8.0", "impl-trait-for-tuples", - "lru", + "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", @@ -6082,7 +6140,7 @@ dependencies = [ "log", "names", "nix", - "parity-util-mem", + "parity-util-mem 0.7.0", "regex", "rpassword", "sc-client-api", @@ -6117,8 +6175,8 @@ dependencies = [ "futures 0.3.5", "hash-db", "hex-literal", - "kvdb", - "kvdb-memorydb", + "kvdb 0.7.0", + "kvdb-memorydb 0.6.0", "lazy_static", "log", "parity-scale-codec", @@ -6153,14 +6211,14 @@ dependencies = [ "blake2-rfc", "env_logger 0.7.1", "hash-db", - "kvdb", - "kvdb-memorydb", + "kvdb 0.7.0", + "kvdb-memorydb 0.7.0", "kvdb-rocksdb", "linked-hash-map", "log", "parity-db", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "quickcheck", "sc-client-api", @@ -6563,7 +6621,7 @@ dependencies = [ "ansi_term 0.12.1", "futures 0.3.5", "log", - "parity-util-mem", + "parity-util-mem 0.7.0", "sc-client-api", "sc-network", "sp-blockchain", @@ -6631,7 +6689,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru", + "lru 0.4.3", "nohash-hasher", "parity-scale-codec", "parking_lot 0.10.2", @@ -6676,7 +6734,7 @@ dependencies = [ "futures-timer 3.0.2", "libp2p", "log", - "lru", + "lru 0.4.3", "quickcheck", "rand 0.7.3", "sc-network", @@ -6872,7 +6930,7 @@ dependencies = [ "netstat2", "parity-multiaddr 0.7.3", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "pin-project", "procfs", @@ -6964,7 +7022,7 @@ dependencies = [ "env_logger 0.7.1", "log", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parity-util-mem-derive", "parking_lot 0.10.2", "sc-client-api", @@ -7019,7 +7077,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "serde", "sp-blockchain", @@ -7043,7 +7101,7 @@ dependencies = [ "intervalier", "log", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "sc-block-builder", "sc-client-api", @@ -7546,7 +7604,7 @@ version = "2.0.0-rc4" dependencies = [ "derive_more", "log", - "lru", + "lru 0.4.3", "parity-scale-codec", "parking_lot 0.10.2", "sp-block-builder", @@ -7670,7 +7728,7 @@ dependencies = [ "merlin", "num-traits 0.2.11", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "parking_lot 0.10.2", "pretty_assertions", "primitive-types", @@ -7700,7 +7758,7 @@ dependencies = [ name = "sp-database" version = "2.0.0-rc4" dependencies = [ - "kvdb", + "kvdb 0.7.0", "parking_lot 0.10.2", ] @@ -7859,7 +7917,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "paste", "rand 0.7.3", "serde", @@ -8027,7 +8085,7 @@ name = "sp-test-primitives" version = "2.0.0-rc4" dependencies = [ "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "serde", "sp-application-crypto", "sp-core", @@ -8392,7 +8450,7 @@ dependencies = [ "pallet-babe", "pallet-timestamp", "parity-scale-codec", - "parity-util-mem", + "parity-util-mem 0.7.0", "sc-block-builder", "sc-executor", "sc-service", @@ -9150,9 +9208,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed8419971832eb3333dc26066e50943a20e0934efeb451b3df5ee94f7f7323ab" +checksum = "24987a413863acfa081fb75051d0c2824cd4c450e2f0a7e03dca93ac989775fc" dependencies = [ "criterion 0.2.11", "hash-db", @@ -9166,12 +9224,12 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb230c24c741993b04cfccbabb45acff6f6480c5f00d3ed8794ea43db3a9d727" +checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" dependencies = [ "hash-db", - "hashbrown", + "hashbrown 0.8.0", "log", "rustc-hex", "smallvec 1.4.1", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 07db27a1f1..0778909fa9 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -21,8 +21,8 @@ serde = "1.0.101" serde_json = "1.0.41" structopt = "0.3" derive_more = "0.99.2" -kvdb = "0.6" -kvdb-rocksdb = "0.8" +kvdb = "0.7" +kvdb-rocksdb = "0.9" sp-trie = { version = "2.0.0-rc4", path = "../../../primitives/trie" } sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } @@ -37,6 +37,6 @@ fs_extra = "1" hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.1.2" } futures = "0.3.1" diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index a32623ffdb..8147d62034 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -25,7 +25,7 @@ sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } -kvdb = "0.6.0" +kvdb = "0.7.0" log = { version = "0.4.8" } parking_lot = "0.10.0" lazy_static = "1.4.0" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 6ebf2f9bf8..3bf480f0b1 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -42,7 +42,7 @@ names = "0.11.0" structopt = "0.3.8" sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } chrono = "0.4.10" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } serde = "1.0.111" [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 42cc60617a..9eb9dd8914 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = "0.10.0" log = "0.4.8" -kvdb = "0.6.0" -kvdb-rocksdb = { version = "0.8", optional = true } -kvdb-memorydb = "0.6.0" +kvdb = "0.7.0" +kvdb-rocksdb = { version = "0.9", optional = true } +kvdb-memorydb = "0.7.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["std"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["std"] } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } blake2-rfc = "0.2.18" @@ -41,7 +41,7 @@ sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" -kvdb-rocksdb = "0.8" +kvdb-rocksdb = "0.9" tempfile = "3" [features] diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 98c72f5deb..74ce14cbbc 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" futures = "0.3.4" log = "0.4.8" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "2.0.0-rc4", path = "../api" } sc-network = { version = "0.8.0-rc4", path = "../network" } sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index f63d3f183d..c55c1cc57a 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -74,7 +74,7 @@ parity-multiaddr = { package = "parity-multiaddr", version = "0.7.3" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-rc4"} sc-tracing = { version = "2.0.0-rc4", path = "../tracing" } tracing = "0.1.10" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [target.'cfg(all(any(unix, windows), not(target_os = "android")))'.dependencies] diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 7cc8d41e76..0fc30cda9f 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -17,7 +17,7 @@ log = "0.4.8" sc-client-api = { version = "2.0.0-rc4", path = "../api" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" [dev-dependencies] diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index bd271d8ba1..290b63cc8c 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -18,7 +18,7 @@ futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" intervalier = "0.4.0" log = "0.4.8" -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.10.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} sc-client-api = { version = "2.0.0-rc4", path = "../api" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 0a30b3a4c9..ecce54505d 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -23,7 +23,7 @@ sp-utils = { version = "2.0.0-rc4", path = "../../../primitives/utils" } sp-core = { version = "2.0.0-rc4", path = "../../../primitives/core" } sp-runtime = { version = "2.0.0-rc4", path = "../../../primitives/runtime" } sp-transaction-pool = { version = "2.0.0-rc4", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" [dev-dependencies] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 14a1d1b022..eaeaf96a3e 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -34,7 +34,7 @@ smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" frame-system = { version = "2.0.0-rc4", path = "../system" } -parity-util-mem = { version = "0.6.1", features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } [features] default = ["std"] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 6a7568a626..9b9a8b270e 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -37,7 +37,7 @@ parking_lot = { version = "0.10.0", optional = true } sp-debug-derive = { version = "2.0.0-rc4", path = "../debug-derive" } sp-externalities = { version = "0.8.0-rc4", optional = true, path = "../externalities" } sp-storage = { version = "2.0.0-rc4", default-features = false, path = "../storage" } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } # full crypto diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index 41ced29a57..6d35e69125 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -11,4 +11,4 @@ documentation = "https://docs.rs/sp-database" [dependencies] parking_lot = "0.10.0" -kvdb = "0.6.0" +kvdb = "0.7.0" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 9bc972646f..ecbaa7ba58 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -26,7 +26,7 @@ paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../inherents" } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 96eeb2839a..3d2eb84464 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" parking_lot = "0.10.0" hash-db = "0.15.2" -trie-db = "0.21.0" +trie-db = "0.22.0" trie-root = "0.16.0" sp-trie = { version = "2.0.0-rc4", path = "../trie" } sp-core = { version = "2.0.0-rc4", path = "../core" } diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 8e14aeeb83..04c40422d2 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-runtime = { version = "2.0.0-rc4", default-features = false, path = "../runtime" } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [features] default = [ diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 1ebc974bfb..4807c5ae0a 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -20,13 +20,13 @@ harness = false codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } sp-std = { version = "2.0.0-rc4", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.21.0", default-features = false } +trie-db = { version = "0.22.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.21.0", default-features = false } +memory-db = { version = "0.24.0", default-features = false } sp-core = { version = "2.0.0-rc4", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.22.0" +trie-bench = "0.24.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 71987da150..f29451dd11 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = frame-executive = { version = "2.0.0-rc4", default-features = false, path = "../../frame/executive" } sp-inherents = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0-rc4", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.21.0", default-features = false } +memory-db = { version = "0.24.0", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc4"} sp-core = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/core" } sp-std = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/std" } @@ -39,8 +39,8 @@ pallet-timestamp = { version = "2.0.0-rc4", default-features = false, path = ".. sp-finality-grandpa = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0-rc4", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.21.0", default-features = false } -parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] } +trie-db = { version = "0.22.0", default-features = false } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.8.0-rc4", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } # 3rd party diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index ed02e8e2fa..fc57c82ef0 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -21,7 +21,7 @@ console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" -kvdb-web = "0.6" +kvdb-web = "0.7" sp-database = { version = "2.0.0-rc4", path = "../../primitives/database" } sc-informant = { version = "0.8.0-rc4", path = "../../client/informant" } sc-service = { version = "0.8.0-rc4", path = "../../client/service", default-features = false } -- GitLab From 1aef04351c286427f720a811ccf43d0605ef4528 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 8 Jul 2020 11:08:47 +0200 Subject: [PATCH 132/144] client/network: Rename DebugInfoBehaviour to PeerInfoBehaviour (#6556) Information retrieved via `DebugInfoBehaviour` is not only used for debugging purposes, e.g. disconnecting from nodes not responding to pings, using external addresses retrieved via indentify, ... In order for the name to reflect the usage of the module this commit renames it. --- client/network/src/behaviour.rs | 20 +++++++++--------- client/network/src/lib.rs | 2 +- .../src/{debug_info.rs => peer_info.rs} | 21 +++++++++---------- 3 files changed, 21 insertions(+), 22 deletions(-) rename client/network/src/{debug_info.rs => peer_info.rs} (96%) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index dec8788f3f..596b1d5167 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -16,7 +16,7 @@ use crate::{ config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, - debug_info, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + peer_info, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, protocol::{message::{self, Roles}, CustomMessageOutcome, Protocol}, Event, ObservedRole, DhtEvent, ExHashT, }; @@ -39,7 +39,7 @@ pub struct Behaviour { substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a /// cache. - debug_info: debug_info::DebugInfoBehaviour, + peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, /// Block request handling. @@ -113,7 +113,7 @@ impl Behaviour { ) -> Self { Behaviour { substrate, - debug_info: debug_info::DebugInfoBehaviour::new(user_agent, local_public_key), + peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), block_requests, finality_proof_requests, @@ -153,8 +153,8 @@ impl Behaviour { /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes /// we're connected to, meaning that if `None` is returned then we're not connected to that /// node. - pub fn node(&self, peer_id: &PeerId) -> Option { - self.debug_info.node(peer_id) + pub fn node(&self, peer_id: &PeerId) -> Option { + self.peer_info.node(peer_id) } /// Registers a new notifications protocol. @@ -355,10 +355,10 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess +impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: debug_info::DebugInfoEvent) { - let debug_info::DebugInfoEvent::Identified { peer_id, mut info } = event; + fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { + let peer_info::PeerInfoEvent::Identified { peer_id, mut info } = event; if info.listen_addrs.len() > 30 { debug!(target: "sub-libp2p", "Node {:?} has reported more than 30 addresses; \ it is identified by {:?} and {:?}", peer_id, info.protocol_version, @@ -380,8 +380,8 @@ impl NetworkBehaviourEventProcess DiscoveryOut::UnroutablePeer(_peer_id) => { // Obtaining and reporting listen addresses for unroutable peers back // to Kademlia is handled by the `Identify` protocol, part of the - // `DebugInfoBehaviour`. See the `NetworkBehaviourEventProcess` - // implementation for `DebugInfoEvent`. + // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` + // implementation for `PeerInfoEvent`. } DiscoveryOut::Discovered(peer_id) => { self.substrate.add_discovered_nodes(iter::once(peer_id)); diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 6106616d99..b8e5d7582b 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -245,7 +245,7 @@ mod behaviour; mod block_requests; mod chain; -mod debug_info; +mod peer_info; mod discovery; mod finality_requests; mod light_client_handler; diff --git a/client/network/src/debug_info.rs b/client/network/src/peer_info.rs similarity index 96% rename from client/network/src/debug_info.rs rename to client/network/src/peer_info.rs index a11262caa5..e69ad2b17e 100644 --- a/client/network/src/debug_info.rs +++ b/client/network/src/peer_info.rs @@ -38,9 +38,8 @@ const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); /// Interval at which we perform garbage collection on the node info. const GARBAGE_COLLECT_INTERVAL: Duration = Duration::from_secs(2 * 60); -/// Implementation of `NetworkBehaviour` that holds information about nodes in cache for diagnostic -/// purposes. -pub struct DebugInfoBehaviour { +/// Implementation of `NetworkBehaviour` that holds information about peers in cache. +pub struct PeerInfoBehaviour { /// Periodically ping nodes, and close the connection if it's unresponsive. ping: Ping, /// Periodically identifies the remote and responds to incoming requests. @@ -78,8 +77,8 @@ impl NodeInfo { } } -impl DebugInfoBehaviour { - /// Builds a new `DebugInfoBehaviour`. +impl PeerInfoBehaviour { + /// Builds a new `PeerInfoBehaviour`. pub fn new( user_agent: String, local_public_key: PublicKey, @@ -89,7 +88,7 @@ impl DebugInfoBehaviour { Identify::new(proto_version, user_agent, local_public_key) }; - DebugInfoBehaviour { + PeerInfoBehaviour { ping: Ping::new(PingConfig::new()), identify, nodes_info: FnvHashMap::default(), @@ -154,8 +153,8 @@ impl<'a> Node<'a> { /// Event that can be emitted by the behaviour. #[derive(Debug)] -pub enum DebugInfoEvent { - /// We have obtained debug information from a peer, including the addresses it is listening +pub enum PeerInfoEvent { + /// We have obtained identity information from a peer, including the addresses it is listening /// on. Identified { /// Id of the peer that has been identified. @@ -165,12 +164,12 @@ pub enum DebugInfoEvent { }, } -impl NetworkBehaviour for DebugInfoBehaviour { +impl NetworkBehaviour for PeerInfoBehaviour { type ProtocolsHandler = IntoProtocolsHandlerSelect< ::ProtocolsHandler, ::ProtocolsHandler >; - type OutEvent = DebugInfoEvent; + type OutEvent = PeerInfoEvent; fn new_handler(&mut self) -> Self::ProtocolsHandler { IntoProtocolsHandler::select(self.ping.new_handler(), self.identify.new_handler()) @@ -317,7 +316,7 @@ impl NetworkBehaviour for DebugInfoBehaviour { match event { IdentifyEvent::Received { peer_id, info, .. } => { self.handle_identify_report(&peer_id, &info); - let event = DebugInfoEvent::Identified { peer_id, info }; + let event = PeerInfoEvent::Identified { peer_id, info }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); } IdentifyEvent::Error { peer_id, error } => -- GitLab From 802a0d0b0ade796a3b2d4663212518315923fe8a Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Wed, 8 Jul 2020 12:11:09 +0200 Subject: [PATCH 133/144] Add log rotation (#6564) * Use flexi_logger; Add log rotation * Add default rotation; Add FlexiLogger error * Fix compilation error * Remove logging to stdout if it's not a tty * Fix formatting Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Remove needless debug statement * Default to unlimited size for log rotation * Add more comments about log-age option * Remove unused variable * Fix typo in comment Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 22 +- bin/node/bench/src/main.rs | 5 +- client/cli/Cargo.toml | 2 +- client/cli/src/config.rs | 13 +- client/cli/src/error.rs | 4 + client/cli/src/lib.rs | 81 +------- client/cli/src/logger.rs | 271 +++++++++++++++++++++++++ client/cli/src/params/shared_params.rs | 10 + 8 files changed, 324 insertions(+), 84 deletions(-) create mode 100644 client/cli/src/logger.rs diff --git a/Cargo.lock b/Cargo.lock index c0c734533d..49c68c667e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1450,6 +1450,20 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flexi_logger" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33897654c23a50cebab45e18356f69fb771c9949a6928344fb1f01ffccc7c5f3" +dependencies = [ + "chrono", + "glob 0.3.0", + "log", + "regex", + "thiserror", + "yansi", +] + [[package]] name = "fnv" version = "1.0.6" @@ -6133,8 +6147,8 @@ dependencies = [ "atty", "chrono", "derive_more", - "env_logger 0.7.1", "fdlimit", + "flexi_logger", "futures 0.3.5", "lazy_static", "log", @@ -9878,6 +9892,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" + [[package]] name = "zeroize" version = "1.1.0" diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 1182024711..941176f277 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -63,6 +63,9 @@ struct Opt { #[structopt(long)] transactions: Option, + #[structopt(flatten)] + log_rotation_opt: sc_cli::LogRotationOpt, + /// Mode /// /// "regular" for regular benchmark @@ -77,7 +80,7 @@ fn main() { let opt = Opt::from_args(); if !opt.json { - sc_cli::init_logger(""); + sc_cli::init_logger("", &opt.log_rotation_opt).expect("init_logger should not fail."); } let mut import_benchmarks = Vec::new(); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 3bf480f0b1..4523769e73 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -env_logger = "0.7.0" +flexi_logger = "0.15.7" log = "0.4.8" atty = "0.2.13" regex = "1.3.1" diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index fa3f09116c..35fc95cb60 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -21,9 +21,10 @@ use crate::arg_enums::Database; use crate::error::Result; use crate::{ - init_logger, DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, + DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, }; +use crate::logger::{LogRotationOpt, init_logger}; use names::{Generator, Name}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::config::{ @@ -488,6 +489,13 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } + /// Get the log directory for logging. + /// + /// By default this is retrieved from `SharedParams`. + fn log_rotation_opt(&self) -> Result<&LogRotationOpt> { + Ok(self.shared_params().log_rotation_opt()) + } + /// Initialize substrate. This must be done only once. /// /// This method: @@ -497,11 +505,12 @@ pub trait CliConfiguration: Sized { /// 3. Initialize the logger fn init(&self) -> Result<()> { let logger_pattern = self.log_filters()?; + let log_rotation_opt = self.log_rotation_opt()?; sp_panic_handler::set(&C::support_url(), &C::impl_version()); fdlimit::raise_fd_limit(); - init_logger(&logger_pattern); + init_logger(&logger_pattern, log_rotation_opt)?; Ok(()) } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index f091354be1..f29b59ed12 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . //! Initialization errors. +use flexi_logger::FlexiLoggerError; /// Result type alias for the CLI. pub type Result = std::result::Result; @@ -32,6 +33,8 @@ pub enum Error { Service(sc_service::Error), /// Client error Client(sp_blockchain::Error), + /// Flexi Logger error + FlexiLogger(FlexiLoggerError), /// Input error #[from(ignore)] Input(String), @@ -65,6 +68,7 @@ impl std::error::Error for Error { Error::Cli(ref err) => Some(err), Error::Service(ref err) => Some(err), Error::Client(ref err) => Some(err), + Error::FlexiLogger(ref err) => Some(err), Error::Input(_) => None, Error::InvalidListenMultiaddress => None, Error::Other(_) => None, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c7f48d2721..a06e48626f 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -27,15 +27,13 @@ mod config; mod error; mod params; mod runner; +mod logger; pub use arg_enums::*; pub use commands::*; pub use config::*; pub use error::*; -use lazy_static::lazy_static; -use log::info; pub use params::*; -use regex::Regex; pub use runner::*; use sc_service::{Configuration, TaskExecutor}; pub use sc_service::{ChainSpec, Role}; @@ -46,6 +44,7 @@ use structopt::{ clap::{self, AppSettings}, StructOpt, }; +pub use crate::logger::{init_logger, LogRotationOpt}; /// Substrate client CLI /// @@ -227,79 +226,3 @@ pub trait SubstrateCli: Sized { /// Native runtime version. fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } - -/// Initialize the logger -pub fn init_logger(pattern: &str) { - use ansi_term::Colour; - - let mut builder = env_logger::Builder::new(); - // Disable info logging by default for some modules: - builder.filter(Some("ws"), log::LevelFilter::Off); - builder.filter(Some("yamux"), log::LevelFilter::Off); - builder.filter(Some("hyper"), log::LevelFilter::Warn); - builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); - // Always log the special target `sc_tracing`, overrides global level - builder.filter(Some("sc_tracing"), log::LevelFilter::Info); - // Enable info for others. - builder.filter(None, log::LevelFilter::Info); - - if let Ok(lvl) = std::env::var("RUST_LOG") { - builder.parse_filters(&lvl); - } - - builder.parse_filters(pattern); - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; - - builder.format(move |buf, record| { - let now = time::now(); - let timestamp = - time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); - - let mut output = if log::max_level() <= log::LevelFilter::Info { - format!( - "{} {}", - Colour::Black.bold().paint(timestamp), - record.args(), - ) - } else { - let name = ::std::thread::current() - .name() - .map_or_else(Default::default, |x| { - format!("{}", Colour::Blue.bold().paint(x)) - }); - let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; - let timestamp = format!("{}.{:03}", timestamp, millis); - format!( - "{} {} {} {} {}", - Colour::Black.bold().paint(timestamp), - name, - record.level(), - record.target(), - record.args() - ) - }; - - if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { - // duplicate INFO/WARN output to console - println!("{}", output); - } - - if !enable_color { - output = kill_color(output.as_ref()); - } - - writeln!(buf, "{}", output) - }); - - if builder.try_init().is_err() { - info!("💬 Not registering Substrate logger, as there is already a global logger registered!"); - } -} - -fn kill_color(s: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); - } - RE.replace_all(s, "").to_string() -} diff --git a/client/cli/src/logger.rs b/client/cli/src/logger.rs new file mode 100644 index 0000000000..2422f0ec60 --- /dev/null +++ b/client/cli/src/logger.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use ansi_term::Colour; +use flexi_logger::{ + DeferredNow, Duplicate, LogSpecBuilder, + LogSpecification, LogTarget, Logger, Criterion, Naming, Cleanup, Age, +}; +use lazy_static::lazy_static; +use regex::Regex; +use std::path::PathBuf; +use structopt::{ + StructOpt, +}; +use crate::error::{Error, Result}; + +type IoResult = std::result::Result<(), std::io::Error>; + +/// Default size used for rotation. Basically unlimited. +const DEFAULT_ROTATION_SIZE: u64 = u64::MAX; + +/// Options for log rotation. +#[derive(Debug, StructOpt)] +pub struct LogRotationOpt { + /// Specify the path of the directory which will contain the log files. + /// Defaults to never rotating logs. + #[structopt(long, parse(from_os_str))] + log_directory: Option, + + /// Rotate the log file when the local clock has started a new day/hour/minute/second + /// since the current file has been created. + #[structopt(long, + conflicts_with("log-size"), + possible_values(&["day", "hour", "minute", "second"]), + parse(from_str = age_from_str)) + ] + log_age: Option, + + /// Rotate the log file when it exceeds this size (in bytes). + #[structopt(long, conflicts_with("log-age"))] + log_size: Option, +} + +/// Utility for parsing an Age from a &str. +fn age_from_str(s: &str) -> Age { + match s { + "day" => Age::Day, + "hour" => Age::Hour, + "minute" => Age::Minute, + "second" => Age::Second, + _ => unreachable!(), + } +} + +/// Format used when writing to a tty. Colors the output. +fn colored_fmt( + w: &mut dyn std::io::Write, + _now: &mut DeferredNow, + record: &log::Record, +) -> IoResult { + let now = time::now(); + let timestamp = + time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); + + let output = if log::max_level() <= log::LevelFilter::Info { + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args(), + ) + } else { + let name = ::std::thread::current() + .name() + .map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; + let timestamp = format!("{}.{:03}", timestamp, millis); + format!( + "{} {} {} {} {}", + Colour::Black.bold().paint(timestamp), + name, + record.level(), + record.target(), + record.args() + ) + }; + + write!(w, "{}", output) +} + +/// Format used when logging to files. Does not add any colors. +fn file_fmt( + w: &mut dyn std::io::Write, + _now: &mut DeferredNow, + record: &log::Record, +) -> IoResult { + let now = time::now(); + let timestamp = + time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); + + let output = if log::max_level() <= log::LevelFilter::Info { + format!("{} {}", timestamp, record.args(),) + } else { + let name = std::thread::current() + .name() + .map_or_else(Default::default, |x| format!("{}", x)); + let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; + let timestamp = format!("{}.{:03}", timestamp, millis); + format!( + "{} {} {} {} {}", + timestamp, + name, + record.level(), + record.target(), + record.args() + ) + }; + + // Required because substrate sometimes sends strings that are colored. + // Doing this ensures no colors are ever printed to files. + let output = kill_color(&output); + + write!(w, "{}", output) +} + +/// Initialize the logger +pub fn init_logger(pattern: &str, log_rotation_opt: &LogRotationOpt) -> Result<()> { + + let mut builder = LogSpecBuilder::new(); + // Disable info logging by default for some modules: + builder.module("ws", log::LevelFilter::Off); + builder.module("yamux", log::LevelFilter::Off); + builder.module("hyper", log::LevelFilter::Warn); + builder.module("cranelift_wasm", log::LevelFilter::Warn); + // Always log the special target `sc_tracing`, overrides global level + builder.module("sc_tracing", log::LevelFilter::Info); + // Enable info for others. + builder.default(log::LevelFilter::Info); + + // Add filters defined by RUST_LOG. + builder.insert_modules_from(LogSpecification::env()?); + + // Add filters passed in as argument. + builder.insert_modules_from(LogSpecification::parse(pattern)?); + + // Build the LogSpec. + let spec = builder.build(); + + // Use timestamps to differentiate logs. + let naming = Naming::Timestamps; + // Never cleanup old logs; let the end-user take care of that. + let cleanup = Cleanup::Never; + + let age = log_rotation_opt.log_age; + let size = log_rotation_opt.log_size; + + // Build a Criterion from the options. + let criterion = match (age, size) { + (Some(a), None) => Criterion::Age(a), + (None, Some(s)) => Criterion::Size(s), + // Default to rotating with a size of `DEFAULT_ROTATION_SIZE`. + (None, None) => Criterion::Size(DEFAULT_ROTATION_SIZE), + _ => return Err(Error::Input("Only one of Age or Size should be defined".into())) + }; + + let isatty_stderr = atty::is(atty::Stream::Stderr); + let isatty_stdout = atty::is(atty::Stream::Stdout); + let logger = Logger::with(spec) + .format(file_fmt) + .format_for_stderr(colored_fmt) + .format_for_stdout(colored_fmt) + .rotate(criterion, naming, cleanup); // Won't get used if log_directory has not been specified. + + + let logger = match (log_rotation_opt.log_directory.as_ref(), isatty_stderr) { + // Only log to stderr using colored format; nothing to file, nothing to stdout. + (None, true) => { + logger.log_target(LogTarget::StdErr) + } + // Log to stderr using file format, log to stdout using colored format. + (None, false) => { + let logger = logger + .log_target(LogTarget::DevNull) + .format_for_stderr(file_fmt) + .duplicate_to_stderr(Duplicate::All); + + // Write to stdout only if it's a tty. + if isatty_stdout { + logger.duplicate_to_stdout(Duplicate::Info) + } else { + logger + } + } + // Log to stderr with colored format, log to file with file format. Nothing to stdout. + (Some(file), true) => { + logger + .log_target(LogTarget::File) + .duplicate_to_stderr(Duplicate::All) + .directory(file) + } + // Log to stderr with file format, log to file with file format, log to stdout with colored format. + (Some(file), false) => { + let logger = logger + .log_target(LogTarget::File) + .format_for_stderr(file_fmt) + .duplicate_to_stderr(Duplicate::All) + .directory(file); + + // Write to stdout only if it's a tty. + if isatty_stdout { + logger.duplicate_to_stdout(Duplicate::Info) + } else { + logger + } + } + }; + + logger.start().map(|_| ()).map_err(|e| e.into()) +} + +fn kill_color(s: &str) -> String { + lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + RE.replace_all(s, "").to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn logger_default() { + let pattern = ""; + let log_rotation_opt = LogRotationOpt { + log_directory: None, + log_age: None, + log_size: None, + }; + + assert!(init_logger(pattern, &log_rotation_opt).is_ok()); + } + + #[test] + fn logger_conflicting_opt() { + let pattern = ""; + let log_rotation_opt = LogRotationOpt { + log_directory: None, + log_age: Some(Age::Day), + log_size: Some(1337), + }; + + assert!(init_logger(pattern, &log_rotation_opt).is_err()); + } +} diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index ad9ab04070..42e6875719 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -19,8 +19,10 @@ use sc_service::config::BasePath; use std::path::PathBuf; use structopt::StructOpt; +use crate::logger::LogRotationOpt; /// Shared parameters used by all `CoreParams`. +#[allow(missing_docs)] #[derive(Debug, StructOpt)] pub struct SharedParams { /// Specify the chain specification (one of dev, local, or staging). @@ -41,6 +43,9 @@ pub struct SharedParams { /// By default, all targets log `info`. The global log level can be set with -l. #[structopt(short = "l", long, value_name = "LOG_PATTERN")] pub log: Vec, + + #[structopt(flatten)] + pub log_rotation_opt: LogRotationOpt, } impl SharedParams { @@ -72,4 +77,9 @@ impl SharedParams { pub fn log_filters(&self) -> &[String] { &self.log } + + /// Get the file rotation options for the logging + pub fn log_rotation_opt(&self) -> &LogRotationOpt { + &self.log_rotation_opt + } } -- GitLab From 83b06a297699e1a3b23bb3c161088064782a6e75 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 8 Jul 2020 13:44:51 +0200 Subject: [PATCH 134/144] Send Status message on all newly-opened legacy substreams (#6593) * Send Status message on all newly-opened legacy substreams * Fix tests --- client/network/src/protocol.rs | 40 +++++---- .../src/protocol/generic_proto/behaviour.rs | 48 ++++------- .../protocol/generic_proto/handler/group.rs | 54 +++++------- .../src/protocol/generic_proto/tests.rs | 10 ++- .../protocol/generic_proto/upgrade/legacy.rs | 85 ++++++++++++------- 5 files changed, 121 insertions(+), 116 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ff3748bd55..d037057e50 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -351,6 +351,22 @@ impl BlockAnnouncesHandshake { } } +/// Builds a SCALE-encoded "Status" message to send as handshake for the legacy protocol. +fn build_status_message(protocol_config: &ProtocolConfig, chain: &Arc>) -> Vec { + let info = chain.info(); + let status = message::generic::Status { + version: CURRENT_VERSION, + min_supported_version: MIN_VERSION, + genesis_hash: info.genesis_hash, + roles: protocol_config.roles.into(), + best_number: info.best_number, + best_hash: info.best_hash, + chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible + }; + + Message::::Status(status).encode() +} + /// Fallback mechanism to use to send a notification if no substream is open. #[derive(Debug, Clone, PartialEq, Eq)] enum Fallback { @@ -403,6 +419,7 @@ impl Protocol { local_peer_id, protocol_id.clone(), versions, + build_status_message(&config, &chain), peerset, queue_size_report ); @@ -547,6 +564,11 @@ impl Protocol { pub fn update_chain(&mut self) { let info = self.context_data.chain.info(); self.sync.update_chain_info(&info.best_hash, info.best_number); + self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); + self.behaviour.set_notif_protocol_handshake( + &self.block_announces_protocol, + BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() + ); } /// Inform sync about an own imported block. @@ -683,7 +705,6 @@ impl Protocol { pub fn on_peer_connected(&mut self, who: PeerId) { trace!(target: "sync", "Connecting {}", who); self.handshaking_peers.insert(who.clone(), HandshakingPeer { timestamp: Instant::now() }); - self.send_status(who); } /// Called by peer when it is disconnecting @@ -1329,22 +1350,6 @@ impl Protocol { } } - /// Send Status message - fn send_status(&mut self, who: PeerId) { - let info = self.context_data.chain.info(); - let status = message::generic::Status { - version: CURRENT_VERSION, - min_supported_version: MIN_VERSION, - genesis_hash: info.genesis_hash, - roles: self.config.roles, - best_number: info.best_number, - best_hash: info.best_hash, - chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible - }; - - self.send_message(&who, None, GenericMessage::Status(status)) - } - fn on_block_announce( &mut self, who: PeerId, @@ -1498,6 +1503,7 @@ impl Protocol { }); if let Some((best_num, best_hash)) = new_best { self.sync.update_chain_info(&best_hash, best_num); + self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); self.behaviour.set_notif_protocol_handshake( &self.block_announces_protocol, BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index be2451c3f4..48b75b6321 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -30,12 +30,13 @@ use libp2p::swarm::{ PollParameters }; use log::{debug, error, trace, warn}; +use parking_lot::RwLock; use prometheus_endpoint::HistogramVec; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; use std::task::{Context, Poll}; use std::{borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}}; -use std::{error, mem, pin::Pin, str, time::Duration}; +use std::{error, mem, pin::Pin, str, sync::Arc, time::Duration}; use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other peers. @@ -118,7 +119,7 @@ pub struct GenericProto { /// Notification protocols. Entries are only ever added and not removed. /// Contains, for each protocol, the protocol name and the message to send as part of the /// initial handshake. - notif_protocols: Vec<(Cow<'static, [u8]>, Vec)>, + notif_protocols: Vec<(Cow<'static, [u8]>, Arc>>)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: sc_peerset::Peerset, @@ -220,20 +221,6 @@ enum PeerState { } impl PeerState { - /// True if there exists any established connection to the peer. - fn is_connected(&self) -> bool { - match self { - PeerState::Disabled { .. } | - PeerState::DisabledPendingEnable { .. } | - PeerState::Enabled { .. } | - PeerState::PendingRequest { .. } | - PeerState::Requested | - PeerState::Incoming { .. } => true, - PeerState::Poisoned | - PeerState::Banned { .. } => false, - } - } - /// True if there exists an established connection to the peer /// that is open for custom protocol traffic. fn is_open(&self) -> bool { @@ -343,10 +330,12 @@ impl GenericProto { local_peer_id: PeerId, protocol: impl Into, versions: &[u8], + handshake_message: Vec, peerset: sc_peerset::Peerset, queue_size_report: Option, ) -> Self { - let legacy_protocol = RegisteredProtocol::new(protocol, versions); + let legacy_handshake_message = Arc::new(RwLock::new(handshake_message)); + let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); GenericProto { local_peer_id, @@ -372,7 +361,7 @@ impl GenericProto { protocol_name: impl Into>, handshake_msg: impl Into> ) { - self.notif_protocols.push((protocol_name.into(), handshake_msg.into())); + self.notif_protocols.push((protocol_name.into(), Arc::new(RwLock::new(handshake_msg.into())))); } /// Modifies the handshake of the given notifications protocol. @@ -383,24 +372,17 @@ impl GenericProto { protocol_name: &[u8], handshake_message: impl Into> ) { - let handshake_message = handshake_message.into(); if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == &protocol_name) { - protocol.1 = handshake_message.clone(); - } else { - return; + *protocol.1.write() = handshake_message.into(); } + } - // Send an event to all the peers we're connected to, updating the handshake message. - for (peer_id, _) in self.peers.iter().filter(|(_, state)| state.is_connected()) { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::UpdateHandshake { - protocol_name: Cow::Owned(protocol_name.to_owned()), - handshake_message: handshake_message.clone(), - }, - }); - } + /// Modifies the handshake of the legacy protocol. + pub fn set_legacy_handshake_message( + &mut self, + handshake_message: impl Into> + ) { + *self.legacy_protocol.handshake_message().write() = handshake_message.into(); } /// Returns the number of discovered nodes that we keep in memory. diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 625916a05e..ed3e564223 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -64,8 +64,9 @@ use libp2p::swarm::{ NegotiatedSubstream, }; use log::{debug, error}; +use parking_lot::RwLock; use prometheus_endpoint::HistogramVec; -use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; +use std::{borrow::Cow, error, io, str, sync::Arc, task::{Context, Poll}}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -77,10 +78,10 @@ use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; pub struct NotifsHandlerProto { /// Prototypes for handlers for inbound substreams, and the message we respond with in the /// handshake. - in_handlers: Vec<(NotifsInHandlerProto, Vec)>, + in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandlerProto, Vec)>, + out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, /// Prototype for handler for backwards-compatibility. legacy: LegacyProtoHandlerProto, @@ -91,10 +92,10 @@ pub struct NotifsHandlerProto { /// See the documentation at the module level for more information. pub struct NotifsHandler { /// Handlers for inbound substreams, and the message we respond with in the handshake. - in_handlers: Vec<(NotifsInHandler, Vec)>, + in_handlers: Vec<(NotifsInHandler, Arc>>)>, /// Handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandler, Vec)>, + out_handlers: Vec<(NotifsOutHandler, Arc>>)>, /// Handler for backwards-compatibility. legacy: LegacyProtoHandler, @@ -161,18 +162,6 @@ pub enum NotifsHandlerIn { message: Vec, }, - /// Modifies the handshake message of a notifications protocol. - UpdateHandshake { - /// Name of the protocol for the message. - /// - /// Must match one of the registered protocols. - protocol_name: Cow<'static, [u8]>, - - /// The new handshake message to send if we open a substream or if the remote opens a - /// substream towards us. - handshake_message: Vec, - }, - /// Sends a notifications message. SendNotification { /// Name of the protocol for the message. @@ -253,7 +242,7 @@ impl NotifsHandlerProto { /// messages queue. If passed, it must have one label for the protocol name. pub fn new( legacy: RegisteredProtocol, - list: impl Into, Vec)>>, + list: impl Into, Arc>>)>>, queue_size_report: Option ) -> Self { let list = list.into(); @@ -346,12 +335,17 @@ impl ProtocolsHandler for NotifsHandler { self.enabled = EnabledState::Enabled; self.legacy.inject_event(LegacyProtoHandlerIn::Enable); for (handler, initial_message) in &mut self.out_handlers { + // We create `initial_message` on a separate line to be sure that the lock + // is released as soon as possible. + let initial_message = initial_message.read().clone(); handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message: initial_message.clone(), + initial_message, }); } for num in self.pending_in.drain(..) { - let handshake_message = self.in_handlers[num].1.clone(); + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_handlers[num].1.read().clone(); self.in_handlers[num].0 .inject_event(NotifsInHandlerIn::Accept(handshake_message)); } @@ -375,18 +369,6 @@ impl ProtocolsHandler for NotifsHandler { }, NotifsHandlerIn::SendLegacy { message } => self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), - NotifsHandlerIn::UpdateHandshake { protocol_name, handshake_message } => { - for (handler, current_handshake) in &mut self.in_handlers { - if handler.protocol_name() == &*protocol_name { - *current_handshake = handshake_message.clone(); - } - } - for (handler, current_handshake) in &mut self.out_handlers { - if handler.protocol_name() == &*protocol_name { - *current_handshake = handshake_message.clone(); - } - } - } NotifsHandlerIn::SendNotification { message, encoded_fallback_message, protocol_name } => { for (handler, _) in &mut self.out_handlers { if handler.protocol_name() != &protocol_name[..] { @@ -524,8 +506,12 @@ impl ProtocolsHandler for NotifsHandler { ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => match self.enabled { EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message.clone())), + EnabledState::Enabled => { + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = handshake_message.read().clone(); + handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) + }, EnabledState::Disabled => handler.inject_event(NotifsInHandlerIn::Refuse), }, diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index de02ac5f34..f932a3a089 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -83,7 +83,7 @@ fn build_nodes() -> (Swarm, Swarm) { }); let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(local_peer_id, &b"test"[..], &[1], peerset, None), + inner: GenericProto::new(local_peer_id, &b"test"[..], &[1], vec![], peerset, None), addrs: addrs .iter() .enumerate() @@ -241,6 +241,8 @@ fn two_nodes_transfer_lots_of_packets() { ); } }, + // An empty handshake is being sent after opening. + Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, _ => panic!(), } } @@ -251,6 +253,8 @@ fn two_nodes_transfer_lots_of_packets() { loop { match ready!(service2.poll_next_unpin(cx)) { Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + // An empty handshake is being sent after opening. + Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, Some(GenericProtoOut::LegacyMessage { message, .. }) => { match Message::::decode(&mut &message[..]).unwrap() { Message::::BlockResponse(BlockResponse { id: _, blocks }) => { @@ -312,6 +316,8 @@ fn basic_two_nodes_requests_in_parallel() { service1.send_packet(&peer_id, msg.encode()); } }, + // An empty handshake is being sent after opening. + Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, _ => panic!(), } } @@ -321,6 +327,8 @@ fn basic_two_nodes_requests_in_parallel() { loop { match ready!(service2.poll_next_unpin(cx)) { Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + // An empty handshake is being sent after opening. + Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, Some(GenericProtoOut::LegacyMessage { message, .. }) => { let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); to_receive.remove(pos); diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index 13560113bb..538532c1af 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -21,7 +21,8 @@ use bytes::BytesMut; use futures::prelude::*; use futures_codec::Framed; use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; -use std::{collections::VecDeque, io, pin::Pin, vec::IntoIter as VecIntoIter}; +use parking_lot::RwLock; +use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; use std::task::{Context, Poll}; use unsigned_varint::codec::UviBytes; @@ -38,12 +39,13 @@ pub struct RegisteredProtocol { /// List of protocol versions that we support. /// Ordered in descending order so that the best comes first. supported_versions: Vec, + /// Handshake to send after the substream is open. + handshake_message: Arc>>, } impl RegisteredProtocol { - /// Creates a new `RegisteredProtocol`. The `custom_data` parameter will be - /// passed inside the `RegisteredProtocolOutput`. - pub fn new(protocol: impl Into, versions: &[u8]) + /// Creates a new `RegisteredProtocol`. + pub fn new(protocol: impl Into, versions: &[u8], handshake_message: Arc>>) -> Self { let protocol = protocol.into(); let mut base_name = b"/substrate/".to_vec(); @@ -58,8 +60,14 @@ impl RegisteredProtocol { tmp.sort_unstable_by(|a, b| b.cmp(&a)); tmp }, + handshake_message, } } + + /// Returns the `Arc` to the handshake message that was passed at initialization. + pub fn handshake_message(&self) -> &Arc>> { + &self.handshake_message + } } impl Clone for RegisteredProtocol { @@ -68,6 +76,7 @@ impl Clone for RegisteredProtocol { id: self.id.clone(), base_name: self.base_name.clone(), supported_versions: self.supported_versions.clone(), + handshake_message: self.handshake_message.clone(), } } } @@ -244,10 +253,10 @@ impl ProtocolName for RegisteredProtocolName { } impl InboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = RegisteredProtocolSubstream; - type Future = future::Ready>; + type Future = Pin> + Send>>; type Error = io::Error; fn upgrade_inbound( @@ -255,26 +264,31 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, socket: TSubstream, info: Self::Info, ) -> Self::Future { - let framed = { - let mut codec = UviBytes::default(); - codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. - Framed::new(socket, codec) - }; - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Listener, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, + Box::pin(async move { + let mut framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + let handshake = BytesMut::from(&self.handshake_message.read()[..]); + framed.send(handshake).await?; + + Ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Listener, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }) }) } } impl OutboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = >::Output; type Future = >::Future; @@ -285,16 +299,25 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, socket: TSubstream, info: Self::Info, ) -> Self::Future { - let framed = Framed::new(socket, UviBytes::default()); - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Dialer, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, + Box::pin(async move { + let mut framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + let handshake = BytesMut::from(&self.handshake_message.read()[..]); + framed.send(handshake).await?; + + Ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Dialer, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }) }) } } -- GitLab From 59ee76a0f342ae0dc1c6a6777d24d7811d9261ef Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 8 Jul 2020 16:00:30 +0200 Subject: [PATCH 135/144] *: Update to libp2p v0.21.1 (#6559) * *Cargo.toml: Update versions * client/network/src/discovery: Adjust to Kademlia API changes * client/network: Adjust to one_shot.rs changes * client/network/discovery: Log address list on trace level * client/network/discovery: Ignore RoutablePeer and PendingRoutablePeer * Commit Cargo.lock * Finish update Co-authored-by: Pierre Krieger --- Cargo.lock | 176 +++++++++++++-------- bin/node/browser-testing/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 4 +- client/network/Cargo.toml | 6 +- client/network/src/behaviour.rs | 10 +- client/network/src/block_requests.rs | 4 +- client/network/src/discovery.rs | 55 +++---- client/network/src/finality_requests.rs | 2 +- client/network/src/light_client_handler.rs | 2 +- client/network/src/service.rs | 21 +-- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- utils/browser/Cargo.toml | 2 +- utils/prometheus/Cargo.toml | 3 +- 18 files changed, 171 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 49c68c667e..383feaa0ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -225,39 +225,33 @@ checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" [[package]] name = "async-std" -version = "1.5.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538ecb01eb64eecd772087e5b6f7540cbc917f047727339a472dafed2185b267" +checksum = "00d68a33ebc8b57800847d00787307f84a562224a14db069b0acefe4c2abbf5d" dependencies = [ "async-task", - "broadcaster", - "crossbeam-channel", - "crossbeam-deque", "crossbeam-utils", + "futures-channel", "futures-core", "futures-io", - "futures-timer 2.0.2", + "futures-timer 3.0.2", "kv-log-macro", "log", "memchr", - "mio", - "mio-uds", "num_cpus", "once_cell", "pin-project-lite", "pin-utils", "slab", + "smol", + "wasm-bindgen-futures", ] [[package]] name = "async-task" -version = "1.3.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d" -dependencies = [ - "libc", - "winapi 0.3.8", -] +checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3" [[package]] name = "async-tls" @@ -465,17 +459,16 @@ dependencies = [ ] [[package]] -name = "broadcaster" -version = "1.0.0" +name = "blocking" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c972e21e0d055a36cf73e4daae870941fe7a8abcd5ac3396aab9e4c126bd87" +checksum = "9d17efb70ce4421e351d61aafd90c16a20fb5bfe339fcdc32a86816280e62ce0" dependencies = [ "futures-channel", - "futures-core", - "futures-sink", "futures-util", - "parking_lot 0.10.2", - "slab", + "once_cell", + "parking", + "waker-fn", ] [[package]] @@ -552,6 +545,12 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" +[[package]] +name = "cache-padded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24508e28c677875c380c20f4d28124fab6f8ed4ef929a1397d7b1a31e92f1005" + [[package]] name = "cargo_metadata" version = "0.10.0" @@ -699,6 +698,15 @@ dependencies = [ "cc", ] +[[package]] +name = "concurrent-queue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83c06aff61f2d899eb87c379df3cbf7876f14471dcab474e0b6dc90ab96c080" +dependencies = [ + "cache-padded", +] + [[package]] name = "console_error_panic_hook" version = "0.1.6" @@ -935,16 +943,6 @@ dependencies = [ "itertools 0.8.2", ] -[[package]] -name = "crossbeam-channel" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061" -dependencies = [ - "crossbeam-utils", - "maybe-uninit", -] - [[package]] name = "crossbeam-deque" version = "0.7.3" @@ -1384,6 +1382,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fastrand" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b90eb1dec02087df472ab9f0db65f27edaa654a746830042688bcc2eaf68090f" + [[package]] name = "fdlimit" version = "0.1.4" @@ -2715,9 +2719,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.69" +version = "0.2.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" +checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" [[package]] name = "libflate" @@ -2749,9 +2753,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db81113df355dea9dddfcb01cd867555298dca29d915f25d1b1a0aad2e29338b" +checksum = "6d743d03fab397cde23925a17cb87b35b25994f44ab8c6a9e46a7e953ec739cd" dependencies = [ "bytes 0.5.4", "futures 0.3.5", @@ -2781,9 +2785,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a0387b930c3d4c2533dc4893c1e0394185ddcc019846121b1b27491e45a2c08" +checksum = "11ca8d5a64a5d19b45e00e8f24afda6b8e1b605fb25ad7bcf62a42ecf19d7ff3" dependencies = [ "asn1_der", "bs58", @@ -2815,9 +2819,9 @@ dependencies = [ [[package]] name = "libp2p-core-derive" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09548626b737ed64080fde595e06ce1117795b8b9fc4d2629fa36561c583171" +checksum = "4b2f4f7b4e596450a0b62a46669caaebab9686c68b3c386053182ab41d761f66" dependencies = [ "quote 1.0.6", "syn 1.0.33", @@ -2825,9 +2829,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc186d9a941fd0207cf8f08ef225a735e2d7296258f570155e525f6ee732f87" +checksum = "f751924b6b98e350005e0b87a822beb246792a3fb878c684e088f866158120ac" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2836,9 +2840,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f76075b170d908bae616f550ade410d9d27c013fa69042551dbfc757c7c094" +checksum = "912c00a7bf67e0e765daf0cc37e08f675ea26aba3d6d1fbfaee81f19a4c23049" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2852,9 +2856,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c819a5425b2eb3416d67e9c868c5c1e922b6658655e06b9eeafaa41304b876" +checksum = "44ed3a4c8111c570ab2bffb30c6353178d7603ce3787e3c5f2493c8d3d16d1f0" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.4", @@ -2879,9 +2883,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55b2d4b80986e5bf158270ab23268ec0e7f644ece5436fbaabc5155472f357" +checksum = "cd004c668160fd922f7268b2cd1e4550ff69165d9c744e9eb5770086eb753d02" dependencies = [ "async-std", "data-encoding", @@ -2901,9 +2905,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be7d913a4cd57de2013257ec73f07d77bfce390b370023e2d59083e5ca079864" +checksum = "14ae0ffacd30f073f96cd518b2c9cd2cb18ac27c3d136a4b23cf1af99f33e541" dependencies = [ "bytes 0.5.4", "fnv", @@ -2917,9 +2921,9 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03db664653369f46ee03fcec483a378c20195089bb43a26cb9fb0058009ac88" +checksum = "f6ad6b67d802de8b5ddc5e8b0ff55a0d0a6a737c2c5c174601dbb9d24e0ad5cb" dependencies = [ "curve25519-dalek", "futures 0.3.5", @@ -2938,9 +2942,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.19.3" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8dedd34e35a9728d52d59ef36a218e411359a353f9011b2574b86ee790978f6" +checksum = "70130cf130e4ba6dc177366e72dd9f86f9e3588fa1a0c4145247e676f16affad" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2953,9 +2957,9 @@ dependencies = [ [[package]] name = "libp2p-secio" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c99b3c33e96bb402486d5b4f7cbeab14e66e6a2ed010abbb5bb032a05460bfda" +checksum = "5ff43513c383f7cdab2736eb98465fc4c5dd5d1988df89749dc8a68950349d56" dependencies = [ "aes-ctr", "ctr", @@ -2983,9 +2987,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce53ff4d127cf8b39adf84dbd381ca32d49bd85788cee08e6669da2495993930" +checksum = "3829b323fe096a9363362d0dbbfb3d73f12f1760a6a5c193a779994ab8cbc584" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2998,9 +3002,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481500c5774c62e8c413e9535b3f33a0e3dbacf2da63b8d3056c686a9df4146" +checksum = "9b1fa2bbad054020cb875546a577a66a65a5bf42eff55ed5265f92ffee3cc052" dependencies = [ "async-std", "futures 0.3.5", @@ -3014,9 +3018,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59fdbb5706f2723ca108c088b1c7a37f735a8c328021f0508007162627e9885" +checksum = "1e2b3f0281c184af2e3481ad2463682735d491b2ceb8f73fa99dcd5d41e7afbf" dependencies = [ "futures 0.3.5", "js-sys", @@ -3028,9 +3032,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4440551bf6519e0a684cd859ea809aec6d798f686e0d6ed03a28c3e76849b8" +checksum = "bd3a13025c0f621647ed2c5147615468e7b3cd1a5c7f26f2a6f6f8eafc9c1950" dependencies = [ "async-tls", "either", @@ -3048,9 +3052,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da33e7b5f49c75c6a8afb0b8d1e229f5fa48be9f39bd14cdbc21459a02ac6fc" +checksum = "46ae9bf2f7d8a4be9c7e9b61df9de9dc1bd66419d669098f22f81f8d9571029a" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -5052,6 +5056,12 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +[[package]] +name = "parking" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4029bc3504a62d92e42f30b9095fdef73b8a0b2a06aa41ce2935143b05a1a06" + [[package]] name = "parking_lot" version = "0.9.0" @@ -7429,6 +7439,27 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +[[package]] +name = "smol" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "620cbb3c6e34da57d3a248cda0cd01cd5848164dc062e764e65d06fe3ea7aed5" +dependencies = [ + "async-task", + "blocking", + "concurrent-queue", + "fastrand", + "futures-io", + "futures-util", + "libc", + "once_cell", + "scoped-tls", + "slab", + "socket2", + "wepoll-sys-stjepang", + "winapi 0.3.8", +] + [[package]] name = "snow" version = "0.7.0" @@ -9493,6 +9524,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" + [[package]] name = "walkdir" version = "2.3.1" @@ -9787,6 +9824,15 @@ dependencies = [ "webpki", ] +[[package]] +name = "wepoll-sys-stjepang" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +dependencies = [ + "cc", +] + [[package]] name = "which" version = "3.1.1" diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 0fa2c4d51a..2e7ebe3be0 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } jsonrpc-core = "14.2.0" serde = "1.0.106" serde_json = "1.0.48" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 5ade94275e..2713ccdbf1 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -33,7 +33,7 @@ derive_more = { version = "0.99.2" } sc-rpc = { version = "2.0.0-rc4", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.2.0", features = ["http"] } hyper = "0.12.35" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } serde_json = "1.0" [features] diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index a3ff17d9e0..75cb30646b 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", default-features = false, version = "1 derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.20.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.21.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc4"} prost = "0.6.1" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index aba5b49563..99a1e7eb8b 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } log = "0.4.8" lru = "0.4.3" sc-network = { version = "0.8.0-rc4", path = "../network" } @@ -24,7 +24,7 @@ sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } wasm-timer = "0.2" [dev-dependencies] -async-std = "1.5" +async-std = "1.6.2" quickcheck = "0.9.0" rand = "0.7.2" substrate-test-runtime-client = { version = "2.0.0-rc4", path = "../../test-utils/runtime/client" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 495895c740..da7e7a5a2d 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -63,15 +63,15 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.20.1" +version = "0.21.1" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] -async-std = "1.5" +async-std = "1.6.2" assert_matches = "1.3" env_logger = "0.7.0" -libp2p = { version = "0.20.1", default-features = false, features = ["secio"] } +libp2p = { version = "0.21.1", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 596b1d5167..5967613b98 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -29,7 +29,13 @@ use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollPa use log::debug; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; -use std::{borrow::Cow, collections::VecDeque, iter, task::{Context, Poll}, time::Duration}; +use std::{ + borrow::Cow, + collections::{HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] @@ -124,7 +130,7 @@ impl Behaviour { } /// Returns the list of nodes that we know exist in the network. - pub fn known_peers(&mut self) -> impl Iterator { + pub fn known_peers(&mut self) -> HashSet { self.discovery.known_peers() } diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs index 6d698a7300..8f5116657a 100644 --- a/client/network/src/block_requests.rs +++ b/client/network/src/block_requests.rs @@ -455,8 +455,8 @@ where marker: PhantomData, }; let mut cfg = OneShotHandlerConfig::default(); - cfg.inactive_timeout = self.config.inactivity_timeout; - cfg.substream_timeout = self.config.request_timeout; + cfg.keep_alive_timeout = self.config.inactivity_timeout; + cfg.outbound_substream_timeout = self.config.request_timeout; OneShotHandler::new(SubstreamProtocol::new(p), cfg) } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index c48722c0f7..d08f9d44a1 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -108,7 +108,7 @@ impl DiscoveryConfig { { for (peer_id, addr) in user_defined { for kad in self.kademlias.values_mut() { - kad.add_address(&peer_id, addr.clone()) + kad.add_address(&peer_id, addr.clone()); } self.user_defined.push((peer_id, addr)) } @@ -230,12 +230,18 @@ pub struct DiscoveryBehaviour { impl DiscoveryBehaviour { /// Returns the list of nodes that we know exist in the network. - pub fn known_peers(&mut self) -> impl Iterator { - let mut set = HashSet::new(); - for p in self.kademlias.values_mut().map(|k| k.kbuckets_entries()).flatten() { - set.insert(p); + pub fn known_peers(&mut self) -> HashSet { + let mut peers = HashSet::new(); + for k in self.kademlias.values_mut() { + for b in k.kbuckets() { + for e in b.iter() { + if !peers.contains(e.node.key.preimage()) { + peers.insert(e.node.key.preimage().clone()); + } + } + } } - set.into_iter() + peers } /// Adds a hard-coded address for the given peer, that never expires. @@ -246,7 +252,7 @@ impl DiscoveryBehaviour { pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { if self.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { for k in self.kademlias.values_mut() { - k.add_address(&peer_id, addr.clone()) + k.add_address(&peer_id, addr.clone()); } self.pending_events.push_back(DiscoveryOut::Discovered(peer_id.clone())); self.user_defined.push((peer_id, addr)); @@ -260,7 +266,7 @@ impl DiscoveryBehaviour { pub fn add_self_reported_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { if self.allow_non_globals_in_dht || self.can_add_to_dht(&addr) { for k in self.kademlias.values_mut() { - k.add_address(peer_id, addr.clone()) + k.add_address(peer_id, addr.clone()); } } else { log::trace!(target: "sub-libp2p", "Ignoring self-reported address {} from {}", addr, peer_id); @@ -291,7 +297,8 @@ impl DiscoveryBehaviour { /// Returns the number of nodes that are in the Kademlia k-buckets. pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { - self.kademlias.iter_mut().map(|(id, kad)| (id, kad.kbuckets_entries().count())) + self.kademlias.iter_mut() + .map(|(id, kad)| (id, kad.kbuckets().map(|bucket| bucket.iter().count()).sum())) } /// Returns the number of records in the Kademlia record stores. @@ -407,23 +414,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { list.extend(list_to_filter); } - if !list.is_empty() { - trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); - - } else { - let mut has_entry = false; - for k in self.kademlias.values_mut() { - if k.kbuckets_entries().any(|p| p == peer_id) { - has_entry = true; - break - } - } - if has_entry { - trace!(target: "sub-libp2p", "Addresses of {:?}: none (peer in k-buckets)", peer_id); - } else { - trace!(target: "sub-libp2p", "Addresses of {:?}: none (peer not in k-buckets)", peer_id); - } - } + trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); list } @@ -570,13 +561,16 @@ impl NetworkBehaviour for DiscoveryBehaviour { while let Poll::Ready(ev) = kademlia.poll(cx, params) { match ev { NetworkBehaviourAction::GenerateEvent(ev) => match ev { + KademliaEvent::RoutingUpdated { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } KademliaEvent::UnroutablePeer { peer, .. } => { let ev = DiscoveryOut::UnroutablePeer(peer); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } - KademliaEvent::RoutingUpdated { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + KademliaEvent::RoutablePeer { .. } | KademliaEvent::PendingRoutablePeer { .. } => { + // We are not interested in these events at the moment. } KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(res), .. } => { match res { @@ -640,9 +634,6 @@ impl NetworkBehaviour for DiscoveryBehaviour { e.key(), e) } } - KademliaEvent::Discovered { .. } => { - // We are not interested in these events at the moment. - } // We never start any other type of query. e => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs index 457f934350..9bb3cfec74 100644 --- a/client/network/src/finality_requests.rs +++ b/client/network/src/finality_requests.rs @@ -234,7 +234,7 @@ where marker: PhantomData, }; let mut cfg = OneShotHandlerConfig::default(); - cfg.inactive_timeout = self.config.inactivity_timeout; + cfg.keep_alive_timeout = self.config.inactivity_timeout; OneShotHandler::new(SubstreamProtocol::new(p), cfg) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index ab6bea8761..678a717a89 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -757,7 +757,7 @@ where protocol: self.config.light_protocol.clone(), }; let mut cfg = OneShotHandlerConfig::default(); - cfg.inactive_timeout = self.config.inactivity_timeout; + cfg.keep_alive_timeout = self.config.inactivity_timeout; OneShotHandler::new(SubstreamProtocol::new(p), cfg) } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index c669c809a1..7d4135de6b 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -490,17 +490,18 @@ impl NetworkWorker { let not_connected_peers = { let swarm = &mut *swarm; - let list = swarm.known_peers().filter(|p| open.iter().all(|n| n != *p)) - .cloned().collect::>(); - list.into_iter().map(move |peer_id| { - (peer_id.to_base58(), NetworkStateNotConnectedPeer { - version_string: swarm.node(&peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), - known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) - .into_iter().collect(), + swarm.known_peers().into_iter() + .filter(|p| open.iter().all(|n| n != p)) + .map(move |peer_id| { + (peer_id.to_base58(), NetworkStateNotConnectedPeer { + version_string: swarm.node(&peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) + .into_iter().collect(), + }) }) - }).collect() + .collect() }; NetworkState { diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 6527d093bd..9ca5f024d2 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } sp-consensus = { version = "0.8.0-rc4", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0-rc4", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0-rc4", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index bdec765eda..06c936b255 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } sp-utils = { version = "2.0.0-rc4", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 8d4aecc468..aaa2d11aec 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.20.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.21.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 39c47545c2..016d42504e 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.20.1", default-features = false } +libp2p = { version = "0.21.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0-rc4"} sp-inherents = { version = "2.0.0-rc4", path = "../../inherents" } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index fc57c82ef0..faa1bcfcef 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.19.0", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.20", features = ["websocket"] } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 322935a884..6a76f6109c 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -18,7 +18,6 @@ futures-util = { version = "0.3.1", default-features = false, features = ["io"] derive_more = "0.99" [target.'cfg(not(target_os = "unknown"))'.dependencies] -# async-std is temporarily pinned to <1.6 because version 1.6.0 is buggy -async-std = { version = "1.0.1, <1.6", features = ["unstable"] } +async-std = { version = "1.6.2", features = ["unstable"] } hyper = { version = "0.13.1", default-features = false, features = ["stream"] } tokio = "0.2" -- GitLab From d076f4705ee257b61c02c918755e63e3b34272ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 8 Jul 2020 17:42:42 +0200 Subject: [PATCH 136/144] Improve transaction submission (#6599) * Improve transaction submission Before this pr the transaction pool validated each transaction, even if the transaction was already known to the pool. This pr changes the behavior to first check if we are already aware of a transaction and thus, to only validate them if we don't know them yet. However, there is still the possibility that a given transaction is validated multiple times. This can happen if the transaction is added the first time, but is not yet validated and added to the validated pool. Besides that, this pr fixes the wrong metrics of gossiped transactions in the network. It also moves some metrics to the transaction pool api, to better track when a transaction actually is scheduled for validation. * Make sure we don't submit the same transaction twice from the network concurrently * Remove added listener call * Feedback * Ignore banned on resubmit --- bin/node-template/node/src/service.rs | 1 + bin/node/cli/src/service.rs | 1 + .../basic-authorship/src/basic_authorship.rs | 8 +- client/basic-authorship/src/lib.rs | 6 +- client/network/src/protocol.rs | 69 +++++++++----- client/offchain/src/lib.rs | 2 +- client/rpc/src/author/tests.rs | 2 +- client/service/src/lib.rs | 2 +- .../transaction-pool/graph/src/base_pool.rs | 7 +- client/transaction-pool/graph/src/pool.rs | 93 +++++++++++-------- .../graph/src/validated_pool.rs | 26 +++++- client/transaction-pool/src/api.rs | 28 +++++- client/transaction-pool/src/lib.rs | 35 ++----- client/transaction-pool/src/metrics.rs | 58 +++++++++--- client/transaction-pool/src/testing/pool.rs | 4 +- utils/frame/rpc/system/src/lib.rs | 8 +- 16 files changed, 227 insertions(+), 123 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 89bf159927..3961971fbe 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -47,6 +47,7 @@ macro_rules! new_full_start { .with_transaction_pool(|builder| { let pool_api = sc_transaction_pool::FullChainApi::new( builder.client().clone(), + None, ); Ok(sc_transaction_pool::BasicPool::new( builder.config().transaction_pool.clone(), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 632092cdaa..70c2d10964 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -61,6 +61,7 @@ macro_rules! new_full_start { .with_transaction_pool(|builder| { let pool_api = sc_transaction_pool::FullChainApi::new( builder.client().clone(), + builder.prometheus_registry(), ); let config = builder.config(); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 7343b13c04..581da62737 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -361,7 +361,7 @@ mod tests { let txpool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -414,7 +414,7 @@ mod tests { let txpool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -449,7 +449,7 @@ mod tests { let txpool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -511,7 +511,7 @@ mod tests { let txpool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 4f53c87de3..bc51037277 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -31,7 +31,11 @@ //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); -//! # let txpool = Arc::new(BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone())), None).0); +//! # let txpool = Arc::new(BasicPool::new( +//! # Default::default(), +//! # Arc::new(FullChainApi::new(client.clone(), None)), +//! # None).0, +//! # ); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); //! diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index d037057e50..d98ba8d323 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -51,7 +51,7 @@ use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, Counter, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; use std::borrow::Cow; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map::Entry}; use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; @@ -199,18 +199,21 @@ impl Metrics { } } -struct PendingTransaction { +#[pin_project::pin_project] +struct PendingTransaction { + #[pin] validation: TransactionImportFuture, - peer_id: PeerId, + tx_hash: H, } -impl Future for PendingTransaction { - type Output = (PeerId, TransactionImport); +impl Future for PendingTransaction { + type Output = (H, TransactionImport); fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let this = Pin::into_inner(self); - if let Poll::Ready(import_result) = this.validation.poll_unpin(cx) { - return Poll::Ready((this.peer_id.clone(), import_result)); + let mut this = self.project(); + + if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { + return Poll::Ready((this.tx_hash.clone(), import_result)); } Poll::Pending @@ -226,7 +229,12 @@ pub struct Protocol { /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, /// Pending transactions verification tasks. - pending_transactions: FuturesUnordered, + pending_transactions: FuturesUnordered>, + /// As multiple peers can send us the same transaction, we group + /// these peers using the transaction hash while the transaction is + /// imported. This prevents that we import the same transaction + /// multiple times concurrently. + pending_transactions_peers: HashMap>, config: ProtocolConfig, genesis_hash: B::Hash, sync: ChainSync, @@ -452,6 +460,7 @@ impl Protocol { propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_messages: VecDeque::new(), pending_transactions: FuturesUnordered::new(), + pending_transactions_peers: HashMap::new(), config, context_data: ContextData { peers: HashMap::new(), @@ -1162,7 +1171,7 @@ impl Protocol { fn on_transactions( &mut self, who: PeerId, - transactions: message::Transactions + transactions: message::Transactions, ) { // sending transaction to light node is considered a bad behavior if !self.config.roles.is_full() { @@ -1191,14 +1200,22 @@ impl Protocol { } let hash = self.transaction_pool.hash_of(&t); - peer.known_transactions.insert(hash); + peer.known_transactions.insert(hash.clone()); self.peerset_handle.report_peer(who.clone(), rep::ANY_TRANSACTION); - self.pending_transactions.push(PendingTransaction { - peer_id: who.clone(), - validation: self.transaction_pool.import(t), - }); + match self.pending_transactions_peers.entry(hash.clone()) { + Entry::Vacant(entry) => { + self.pending_transactions.push(PendingTransaction { + validation: self.transaction_pool.import(t), + tx_hash: hash, + }); + entry.insert(vec![who.clone()]); + }, + Entry::Occupied(mut entry) => { + entry.get_mut().push(who.clone()); + } + } } } } @@ -1232,7 +1249,9 @@ impl Protocol { &mut self, transactions: &[(H, B::Extrinsic)], ) -> HashMap> { - let mut propagated_to = HashMap::new(); + let mut propagated_to = HashMap::<_, Vec<_>>::new(); + let mut propagated_transactions = 0; + for (who, peer) in self.context_data.peers.iter_mut() { // never send transactions to the light node if !peer.info.roles.is_full() { @@ -1245,11 +1264,13 @@ impl Protocol { .cloned() .unzip(); + propagated_transactions += hashes.len(); + if !to_send.is_empty() { for hash in hashes { propagated_to .entry(hash) - .or_insert_with(Vec::new) + .or_default() .push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); @@ -1264,10 +1285,8 @@ impl Protocol { } } - if propagated_to.len() > 0 { - if let Some(ref metrics) = self.metrics { - metrics.propagated_transactions.inc(); - } + if let Some(ref metrics) = self.metrics { + metrics.propagated_transactions.inc_by(propagated_transactions as _) } propagated_to @@ -2017,8 +2036,12 @@ impl NetworkBehaviour for Protocol { }; self.pending_messages.push_back(event); } - if let Poll::Ready(Some((peer_id, result))) = self.pending_transactions.poll_next_unpin(cx) { - self.on_handle_transaction_import(peer_id, result); + if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { + if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { + peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); + } else { + warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); + } } if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 7c90065746..2f50ede7ad 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -250,7 +250,7 @@ mod tests { let client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new(TestPool(BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0)); client.execution_extensions() diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index f2f4ddebb2..870390969c 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -63,7 +63,7 @@ impl Default for TestSetup { let pool = Arc::new(BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0); TestSetup { diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 2c09591fc7..f701d82065 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -579,7 +579,7 @@ mod tests { let client = Arc::new(client); let pool = Arc::new(BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0); let source = sp_runtime::transaction_validity::TransactionSource::External; diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 0128e94675..25da341e67 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -261,6 +261,11 @@ impl BasePool bool { + self.future.contains(tx_hash) || self.ready.contains(tx_hash) + } + /// Imports transaction to the pool. /// /// The pool consists of two parts: Future and Ready. @@ -272,7 +277,7 @@ impl BasePool, ) -> error::Result> { - if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) { + if self.is_imported(&tx.hash) { return Err(error::Error::AlreadyImported(Box::new(tx.hash.clone()))) } diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index e4d81c38ae..750d5f5d10 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -23,7 +23,7 @@ use std::{ use crate::{base_pool as base, watcher::Watcher}; -use futures::{Future, FutureExt}; +use futures::Future; use sp_runtime::{ generic::BlockId, traits::{self, SaturatedConversion, Block as BlockT}, @@ -125,6 +125,14 @@ impl Default for Options { } } +/// Should we check that the transaction is banned +/// in the pool, before we verify it? +#[derive(Copy, Clone)] +enum CheckBannedBeforeVerify { + Yes, + No, +} + /// Extrinsics pool that performs validation. pub struct Pool { validated_pool: Arc>, @@ -149,23 +157,29 @@ impl Pool { } /// Imports a bunch of unverified extrinsics to the pool - pub async fn submit_at( + pub async fn submit_at( &self, at: &BlockId, source: TransactionSource, - xts: T, - force: bool, - ) -> Result, B::Error>>, B::Error> where - T: IntoIterator>, - { - let validated_pool = self.validated_pool.clone(); + xts: impl IntoIterator>, + ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); - self.verify(at, xts, force) - .map(move |validated_transactions| validated_transactions - .map(|validated_transactions| validated_pool.submit(validated_transactions - .into_iter() - .map(|(_, tx)| tx)))) - .await + let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; + Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) + } + + /// Resubmit the given extrinsics to the pool. + /// + /// This does not check if a transaction is banned, before we verify it again. + pub async fn resubmit_at( + &self, + at: &BlockId, + source: TransactionSource, + xts: impl IntoIterator>, + ) -> Result, B::Error>>, B::Error> { + let xts = xts.into_iter().map(|xt| (source, xt)); + let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; + Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) } /// Imports one unverified extrinsic to the pool @@ -175,12 +189,8 @@ impl Pool { source: TransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - self.submit_at(at, source, std::iter::once(xt), false) - .map(|import_result| import_result.and_then(|mut import_result| import_result - .pop() - .expect("One extrinsic passed; one result returned; qed") - )) - .await + let res = self.submit_at(at, source, std::iter::once(xt)).await?.pop(); + res.expect("One extrinsic passed; one result returned; qed") } /// Import a single extrinsic and starts to watch their progress in the pool. @@ -192,7 +202,11 @@ impl Pool { ) -> Result, ExtrinsicHash>, B::Error> { let block_number = self.resolve_block_number(at)?; let (_, tx) = self.verify_one( - at, block_number, source, xt, false + at, + block_number, + source, + xt, + CheckBannedBeforeVerify::Yes, ).await; self.validated_pool.submit_and_watch(tx) } @@ -328,7 +342,11 @@ impl Pool { .into_iter() .map(|tx| (tx.source, tx.data.clone())); - let reverified_transactions = self.verify(at, pruned_transactions, false).await?; + let reverified_transactions = self.verify( + at, + pruned_transactions, + CheckBannedBeforeVerify::Yes, + ).await?; log::trace!(target: "txpool", "Pruning at {:?}. Resubmitting transactions.", at); // And finally - submit reverified transactions back to the pool @@ -358,23 +376,17 @@ impl Pool { &self, at: &BlockId, xts: impl IntoIterator)>, - force: bool, + check: CheckBannedBeforeVerify, ) -> Result, ValidatedTransactionFor>, B::Error> { // we need a block number to compute tx validity let block_number = self.resolve_block_number(at)?; - let mut result = HashMap::new(); - - for (hash, validated_tx) in - futures::future::join_all( - xts.into_iter() - .map(|(source, xt)| self.verify_one(at, block_number, source, xt, force)) - ) - .await - { - result.insert(hash, validated_tx); - } - Ok(result) + let res = futures::future::join_all( + xts.into_iter() + .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)) + ).await.into_iter().collect::>(); + + Ok(res) } /// Returns future that validates single transaction at given block. @@ -384,14 +396,13 @@ impl Pool { block_number: NumberFor, source: TransactionSource, xt: ExtrinsicFor, - force: bool, + check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { let (hash, bytes) = self.validated_pool.api().hash_and_length(&xt); - if !force && self.validated_pool.is_banned(&hash) { - return ( - hash.clone(), - ValidatedTransaction::Invalid(hash, error::Error::TemporarilyBanned.into()), - ) + + let ignore_banned = matches!(check, CheckBannedBeforeVerify::No); + if let Err(err) = self.validated_pool.check_is_known(&hash, ignore_banned) { + return (hash.clone(), ValidatedTransaction::Invalid(hash, err.into())) } let validation_result = self.validated_pool.api().validate_transaction( diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index d730b892e3..bde76196ec 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -137,10 +137,30 @@ impl ValidatedPool { self.rotator.is_banned(hash) } + /// A fast check before doing any further processing of a transaction, like validation. + /// + /// If `ingore_banned` is `true`, it will not check if the transaction is banned. + /// + /// It checks if the transaction is already imported or banned. If so, it returns an error. + pub fn check_is_known( + &self, + tx_hash: &ExtrinsicHash, + ignore_banned: bool, + ) -> Result<(), B::Error> { + if !ignore_banned && self.is_banned(tx_hash) { + Err(error::Error::TemporarilyBanned.into()) + } else if self.pool.read().is_imported(tx_hash) { + Err(error::Error::AlreadyImported(Box::new(tx_hash.clone())).into()) + } else { + Ok(()) + } + } + /// Imports a bunch of pre-validated transactions to the pool. - pub fn submit(&self, txs: T) -> Vec, B::Error>> where - T: IntoIterator> - { + pub fn submit( + &self, + txs: impl IntoIterator>, + ) -> Vec, B::Error>> { let results = txs.into_iter() .map(|validated_tx| self.submit_one(validated_tx)) .collect::>(); diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 10ac4aa469..a14d5b0db1 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -33,19 +33,38 @@ use sp_runtime::{ }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use sp_api::{ProvideRuntimeApi, ApiExt}; +use prometheus_endpoint::Registry as PrometheusRegistry; -use crate::error::{self, Error}; +use crate::{metrics::{ApiMetrics, ApiMetricsExt}, error::{self, Error}}; /// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, pool: ThreadPool, _marker: PhantomData, + metrics: Option>, } impl FullChainApi { /// Create new transaction pool logic. - pub fn new(client: Arc) -> Self { + pub fn new( + client: Arc, + prometheus: Option<&PrometheusRegistry>, + ) -> Self { + let metrics = prometheus.map(ApiMetrics::register).and_then(|r| { + match r { + Err(err) => { + log::warn!( + target: "txpool", + "Failed to register transaction pool api prometheus metrics: {:?}", + err, + ); + None + }, + Ok(api) => Some(Arc::new(api)) + } + }); + FullChainApi { client, pool: ThreadPoolBuilder::new() @@ -54,6 +73,7 @@ impl FullChainApi { .create() .expect("Failed to spawn verifier threads, that are critical for node operation."), _marker: Default::default(), + metrics, } } } @@ -87,6 +107,9 @@ where let client = self.client.clone(); let at = at.clone(); + let metrics = self.metrics.clone(); + metrics.report(|m| m.validations_scheduled.inc()); + self.pool.spawn_ok(futures_diagnose::diagnose( "validate-transaction", async move { @@ -94,6 +117,7 @@ where if let Err(e) = tx.send(res) { log::warn!("Unable to send a validate transaction result: {:?}", e); } + metrics.report(|m| m.validations_finished.inc()); }, )); diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index ea8b4bf9de..a7504eb694 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -248,15 +248,9 @@ impl TransactionPool for BasicPool let pool = self.pool.clone(); let at = *at; - self.metrics.report(|metrics| metrics.validations_scheduled.inc_by(xts.len() as u64)); + self.metrics.report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); - let metrics = self.metrics.clone(); - async move { - let tx_count = xts.len(); - let res = pool.submit_at(&at, source, xts, false).await; - metrics.report(|metrics| metrics.validations_finished.inc_by(tx_count as u64)); - res - }.boxed() + async move { pool.submit_at(&at, source, xts).await }.boxed() } fn submit_one( @@ -268,16 +262,9 @@ impl TransactionPool for BasicPool let pool = self.pool.clone(); let at = *at; - self.metrics.report(|metrics| metrics.validations_scheduled.inc()); - - let metrics = self.metrics.clone(); - async move { - let res = pool.submit_one(&at, source, xt).await; - - metrics.report(|metrics| metrics.validations_finished.inc()); - res + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - }.boxed() + async move { pool.submit_one(&at, source, xt).await }.boxed() } fn submit_and_watch( @@ -289,17 +276,12 @@ impl TransactionPool for BasicPool let at = *at; let pool = self.pool.clone(); - self.metrics.report(|metrics| metrics.validations_scheduled.inc()); + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - let metrics = self.metrics.clone(); async move { - let result = pool.submit_and_watch(&at, source, xt) + pool.submit_and_watch(&at, source, xt) .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) - .await; - - metrics.report(|metrics| metrics.validations_finished.inc()); - - result + .await }.boxed() } @@ -632,13 +614,12 @@ impl MaintainedTransactionPool for BasicPool ); } - if let Err(e) = pool.submit_at( + if let Err(e) = pool.resubmit_at( &id, // These transactions are coming from retracted blocks, we should // simply consider them external. TransactionSource::External, resubmit_transactions, - true, ).await { log::debug!( target: "txpool", diff --git a/client/transaction-pool/src/metrics.rs b/client/transaction-pool/src/metrics.rs index d5a10dfd6f..376e6dfe94 100644 --- a/client/transaction-pool/src/metrics.rs +++ b/client/transaction-pool/src/metrics.rs @@ -45,8 +45,7 @@ impl MetricsLink { /// Transaction pool Prometheus metrics. pub struct Metrics { - pub validations_scheduled: Counter, - pub validations_finished: Counter, + pub submitted_transactions: Counter, pub validations_invalid: Counter, pub block_transactions_pruned: Counter, pub block_transactions_resubmitted: Counter, @@ -55,17 +54,10 @@ pub struct Metrics { impl Metrics { pub fn register(registry: &Registry) -> Result { Ok(Self { - validations_scheduled: register( + submitted_transactions: register( Counter::new( - "sub_txpool_validations_scheduled", - "Total number of transactions scheduled for validation", - )?, - registry, - )?, - validations_finished: register( - Counter::new( - "sub_txpool_validations_finished", - "Total number of transactions that finished validation", + "sub_txpool_submitted_transactions", + "Total number of transactions submitted", )?, registry, )?, @@ -93,3 +85,45 @@ impl Metrics { }) } } + +/// Transaction pool api Prometheus metrics. +pub struct ApiMetrics { + pub validations_scheduled: Counter, + pub validations_finished: Counter, +} + +impl ApiMetrics { + /// Register the metrics at the given Prometheus registry. + pub fn register(registry: &Registry) -> Result { + Ok(Self { + validations_scheduled: register( + Counter::new( + "sub_txpool_validations_scheduled", + "Total number of transactions scheduled for validation", + )?, + registry, + )?, + validations_finished: register( + Counter::new( + "sub_txpool_validations_finished", + "Total number of transactions that finished validation", + )?, + registry, + )?, + }) + } +} + +/// An extension trait for [`ApiMetrics`]. +pub trait ApiMetricsExt { + /// Report an event to the metrics. + fn report(&self, report: impl FnOnce(&ApiMetrics)); +} + +impl ApiMetricsExt for Option> { + fn report(&self, report: impl FnOnce(&ApiMetrics)) { + if let Some(metrics) = self.as_ref() { + report(metrics) + } + } +} diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 5ad79a6f75..a938313733 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -1008,7 +1008,7 @@ fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client))).0 + BasicPool::new_test(Arc::new(FullChainApi::new(client, None))).0 ); let transfer = Transfer { @@ -1044,7 +1044,7 @@ fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client.clone()))).0 + BasicPool::new_test(Arc::new(FullChainApi::new(client.clone(), None))).0 ); // Prepare the extrisic, push it to the pool and check that it was added. diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 6927f05b4f..3382453b1d 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -301,7 +301,7 @@ mod tests { let pool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -340,7 +340,7 @@ mod tests { let pool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -363,7 +363,7 @@ mod tests { let pool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); @@ -395,7 +395,7 @@ mod tests { let pool = Arc::new( BasicPool::new( Default::default(), - Arc::new(FullChainApi::new(client.clone())), + Arc::new(FullChainApi::new(client.clone(), None)), None, ).0 ); -- GitLab From e824e8ab0fadec9949ebb8b9e14d98703d6b8d44 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 8 Jul 2020 18:22:01 +0200 Subject: [PATCH 137/144] Add `WeightInfo` to all pallets with benchmarks. (#6575) * Start adding weight info * More weightinfo * finish weight info * more fixes * inital update of node runtime * fix the rest of the compilation * update balances * add docs * fix balances tests * Fix more tests * Fix compile * Fix pallet-evm tests --- .../pallets/template/src/mock.rs | 1 + bin/node-template/runtime/src/lib.rs | 4 ++ bin/node/runtime/src/lib.rs | 19 ++++++ frame/assets/src/lib.rs | 1 + frame/atomic-swap/src/tests.rs | 2 + frame/aura/src/mock.rs | 2 + frame/authority-discovery/src/lib.rs | 2 + frame/authorship/src/lib.rs | 1 + frame/babe/src/mock.rs | 6 ++ frame/balances/src/lib.rs | 26 ++++++++ frame/balances/src/tests_composite.rs | 2 + frame/balances/src/tests_local.rs | 2 + frame/benchmarking/src/tests.rs | 1 + frame/collective/src/lib.rs | 30 +++++++++ frame/contracts/src/tests.rs | 3 + frame/democracy/src/lib.rs | 63 ++++++++++++++++++ frame/democracy/src/tests.rs | 4 ++ frame/elections-phragmen/src/lib.rs | 40 +++++++++++- frame/elections/src/mock.rs | 4 +- frame/evm/src/tests.rs | 3 + frame/example-offchain-worker/src/tests.rs | 1 + frame/example/src/lib.rs | 2 + frame/executive/src/lib.rs | 2 + frame/finality-tracker/src/lib.rs | 1 + frame/generic-asset/src/lib.rs | 1 + frame/generic-asset/src/mock.rs | 1 + frame/grandpa/src/mock.rs | 6 ++ frame/identity/src/lib.rs | 34 ++++++++++ frame/im-online/src/lib.rs | 15 +++++ frame/im-online/src/mock.rs | 3 + frame/indices/src/lib.rs | 21 +++++- frame/indices/src/mock.rs | 3 + frame/membership/src/lib.rs | 1 + frame/multisig/src/lib.rs | 29 +++++++++ frame/multisig/src/tests.rs | 3 + frame/nicks/src/lib.rs | 2 + frame/offences/benchmarking/src/mock.rs | 7 ++ frame/offences/src/lib.rs | 16 +++++ frame/offences/src/mock.rs | 2 + frame/proxy/src/lib.rs | 23 ++++++- frame/proxy/src/tests.rs | 4 ++ frame/randomness-collective-flip/src/lib.rs | 1 + frame/recovery/src/mock.rs | 2 + frame/scheduler/src/lib.rs | 21 ++++++ frame/scored-pool/src/mock.rs | 2 + frame/session/benchmarking/src/mock.rs | 7 +- frame/session/src/lib.rs | 13 ++++ frame/session/src/mock.rs | 3 + frame/society/src/mock.rs | 2 + frame/staking/fuzzer/src/mock.rs | 6 ++ frame/staking/src/lib.rs | 65 +++++++++++++++++++ frame/staking/src/mock.rs | 5 ++ frame/sudo/src/mock.rs | 1 + frame/system/benches/bench.rs | 1 + frame/system/benchmarking/src/mock.rs | 1 + frame/system/src/lib.rs | 24 +++++++ frame/system/src/mock.rs | 1 + frame/timestamp/src/lib.rs | 15 +++++ frame/transaction-payment/src/lib.rs | 2 + frame/treasury/src/lib.rs | 27 ++++++++ frame/treasury/src/tests.rs | 3 + frame/utility/src/lib.rs | 13 ++++ frame/utility/src/tests.rs | 3 + frame/vesting/src/lib.rs | 26 +++++++- test-utils/runtime/src/lib.rs | 2 + 65 files changed, 632 insertions(+), 7 deletions(-) diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 0d9ae7cff7..130a782bb7 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -48,6 +48,7 @@ impl system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for Test { type Event = (); diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 30571b7e0b..85010ba394 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -189,6 +189,8 @@ impl system::Trait for Runtime { type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = balances::AccountData; + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); } impl aura::Trait for Runtime { @@ -221,6 +223,7 @@ impl timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { @@ -235,6 +238,7 @@ impl balances::Trait for Runtime { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 85c3aef41c..f6e85cb34f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -171,11 +171,13 @@ impl frame_system::Trait for Runtime { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; + type WeightInfo = (); } parameter_types! { @@ -193,6 +195,7 @@ impl pallet_multisig::Trait for Runtime { type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; + type WeightInfo = (); } parameter_types! { @@ -246,6 +249,7 @@ impl pallet_proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; + type WeightInfo = (); } parameter_types! { @@ -259,6 +263,7 @@ impl pallet_scheduler::Trait for Runtime { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; + type WeightInfo = (); } parameter_types! { @@ -296,6 +301,7 @@ impl pallet_indices::Trait for Runtime { type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; + type WeightInfo = (); } parameter_types! { @@ -308,6 +314,7 @@ impl pallet_balances::Trait for Runtime { type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Module; + type WeightInfo = (); } parameter_types! { @@ -334,6 +341,7 @@ impl pallet_timestamp::Trait for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { @@ -370,6 +378,7 @@ impl pallet_session::Trait for Runtime { type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } impl pallet_session::historical::Trait for Runtime { @@ -426,6 +435,7 @@ impl pallet_staking::Trait for Runtime { type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = StakingUnsignedPriority; + type WeightInfo = (); } parameter_types! { @@ -474,6 +484,7 @@ impl pallet_democracy::Trait for Runtime { type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; + type WeightInfo = (); } parameter_types! { @@ -488,6 +499,7 @@ impl pallet_collective::Trait for Runtime { type Event = Event; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; + type WeightInfo = (); } parameter_types! { @@ -519,6 +531,7 @@ impl pallet_elections_phragmen::Trait for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; + type WeightInfo = (); } parameter_types! { @@ -533,6 +546,7 @@ impl pallet_collective::Trait for Runtime { type Event = Event; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; + type WeightInfo = (); } type EnsureRootOrHalfCouncil = EnsureOneOf< @@ -587,6 +601,7 @@ impl pallet_treasury::Trait for Runtime { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type WeightInfo = (); } parameter_types! { @@ -688,6 +703,7 @@ impl pallet_im_online::Trait for Runtime { type SessionDuration = SessionDuration; type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; + type WeightInfo = (); } parameter_types! { @@ -699,6 +715,7 @@ impl pallet_offences::Trait for Runtime { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } impl pallet_authority_discovery::Trait for Runtime {} @@ -757,6 +774,7 @@ impl pallet_identity::Trait for Runtime { type Slashed = Treasury; type ForceOrigin = EnsureRootOrHalfCouncil; type RegistrarOrigin = EnsureRootOrHalfCouncil; + type WeightInfo = (); } parameter_types! { @@ -813,6 +831,7 @@ impl pallet_vesting::Trait for Runtime { type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } construct_runtime!( diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 1445c53082..3dfee1ffa6 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -328,6 +328,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for Test { type Event = (); diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index d04ffab205..587b5ccbc1 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -54,6 +54,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -64,6 +65,7 @@ impl pallet_balances::Trait for Test { type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const ProofLimit: u32 = 1024; diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index db2c86492f..5695c50ac5 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -70,12 +70,14 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } impl Trait for Test { diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index f6008c9719..f048fe011b 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -123,6 +123,7 @@ mod tests { type ValidatorIdOf = ConvertInto; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { @@ -167,6 +168,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl_outer_origin! { diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 3023f8a2d3..b62e924d00 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -442,6 +442,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index c398aaeb85..8a0356d8da 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -90,6 +90,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl frame_system::offchain::SendTransactionTypes for Test @@ -116,6 +117,7 @@ impl pallet_session::Trait for Test { type SessionHandler = ::KeyTypeIdProviders; type Keys = MockSessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { @@ -142,6 +144,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { @@ -154,6 +157,7 @@ impl pallet_balances::Trait for Test { type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } pallet_staking_reward_curve::build! { @@ -213,6 +217,7 @@ impl pallet_staking::Trait for Test { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = (); type MinSolutionScoreBump = (); + type WeightInfo = (); } parameter_types! { @@ -224,6 +229,7 @@ impl pallet_offences::Trait for Test { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } impl Trait for Test { diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 62402c7863..fefe054b6d 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -160,6 +160,7 @@ use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; use codec::{Codec, Encode, Decode}; use frame_support::{ StorageValue, Parameter, decl_event, decl_storage, decl_module, decl_error, ensure, + weights::Weight, traits::{ Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, @@ -178,6 +179,22 @@ use frame_system::{self as system, ensure_signed, ensure_root}; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; +pub trait WeightInfo { + fn transfer(u: u32, e: u32, ) -> Weight; + fn transfer_best_case(u: u32, e: u32, ) -> Weight; + fn transfer_keep_alive(u: u32, e: u32, ) -> Weight; + fn set_balance(u: u32, e: u32, ) -> Weight; + fn set_balance_killing(u: u32, e: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn transfer(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn transfer_best_case(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn transfer_keep_alive(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn set_balance(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn set_balance_killing(_u: u32, _e: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Subtrait: frame_system::Trait { /// The balance of an account. type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + @@ -188,6 +205,9 @@ pub trait Subtrait: frame_system::Trait { /// The means of storing the balances of an account. type AccountStore: StoredMap>; + + /// Weight information for the extrinsics in this pallet. + type WeightInfo: WeightInfo; } pub trait Trait: frame_system::Trait { @@ -206,12 +226,16 @@ pub trait Trait: frame_system::Trait { /// The means of storing the balances of an account. type AccountStore: StoredMap>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } impl, I: Instance> Subtrait for T { type Balance = T::Balance; type ExistentialDeposit = T::ExistentialDeposit; type AccountStore = T::AccountStore; + type WeightInfo = >::WeightInfo; } decl_event!( @@ -872,6 +896,7 @@ impl, I: Instance> frame_system::Trait for ElevatedTrait { type OnNewAccount = T::OnNewAccount; type OnKilledAccount = T::OnKilledAccount; type AccountData = T::AccountData; + type SystemWeightInfo = T::SystemWeightInfo; } impl, I: Instance> Trait for ElevatedTrait { type Balance = T::Balance; @@ -879,6 +904,7 @@ impl, I: Instance> Trait for ElevatedTrait { type DustRemoval = (); type ExistentialDeposit = T::ExistentialDeposit; type AccountStore = T::AccountStore; + type WeightInfo = >::WeightInfo; } impl, I: Instance> Currency for Module where diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 81cb3449a8..8e764112ba 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -91,6 +91,7 @@ impl frame_system::Trait for Test { type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; @@ -108,6 +109,7 @@ impl Trait for Test { type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = system::Module; + type WeightInfo = (); } pub struct ExtBuilder { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 54ab22af33..86abc2b604 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -91,6 +91,7 @@ impl frame_system::Trait for Test { type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = Module; + type SystemWeightInfo = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; @@ -113,6 +114,7 @@ impl Trait for Test { system::CallKillAccount, u64, super::AccountData >; + type WeightInfo = (); } pub struct ExtBuilder { diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 674d92eb85..4ff645562e 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -100,6 +100,7 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for Test { diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 83116080d0..e2567d0477 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -76,6 +76,30 @@ pub type MemberCount = u32; /// + This pallet assumes that dependents keep to the limit without enforcing it. pub const MAX_MEMBERS: MemberCount = 100; +pub trait WeightInfo { + fn set_members(m: u32, n: u32, p: u32, ) -> Weight; + fn execute(m: u32, b: u32, ) -> Weight; + fn propose_execute(m: u32, b: u32, ) -> Weight; + fn propose_proposed(m: u32, p: u32, b: u32, ) -> Weight; + fn vote(m: u32, ) -> Weight; + fn close_early_disapproved(m: u32, p: u32, b: u32, ) -> Weight; + fn close_early_approved(m: u32, p: u32, b: u32, ) -> Weight; + fn close_disapproved(m: u32, p: u32, b: u32, ) -> Weight; + fn close_approved(m: u32, p: u32, b: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn set_members(_m: u32, _n: u32, _p: u32, ) -> Weight { 1_000_000_000 } + fn execute(_m: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn propose_execute(_m: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn propose_proposed(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn vote(_m: u32, ) -> Weight { 1_000_000_000 } + fn close_early_disapproved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn close_early_approved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn close_disapproved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn close_approved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The outer origin type. type Origin: From>; @@ -94,6 +118,9 @@ pub trait Trait: frame_system::Trait { /// Maximum number of proposals allowed to be active in parallel. type MaxProposals: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// Origin for the collective module. @@ -1039,6 +1066,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for Test { type Origin = Origin; @@ -1046,6 +1074,7 @@ mod tests { type Event = Event; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; + type WeightInfo = (); } impl Trait for Test { type Origin = Origin; @@ -1053,6 +1082,7 @@ mod tests { type Event = Event; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; + type WeightInfo = (); } pub type Block = sp_runtime::generic::Block; diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 5303375e01..a54bfad654 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -132,6 +132,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { type Balance = u64; @@ -139,6 +140,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 1; @@ -147,6 +149,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { pub const SignedClaimHandicap: u64 = 2; diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index b005ad3641..ae256f9d73 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -201,6 +201,66 @@ type BalanceOf = <::Currency as Currency< = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub trait WeightInfo { + fn propose(p: u32, ) -> Weight; + fn second(s: u32, ) -> Weight; + fn vote_new(r: u32, ) -> Weight; + fn vote_existing(r: u32, ) -> Weight; + fn emergency_cancel(r: u32, ) -> Weight; + fn external_propose(p: u32, v: u32, ) -> Weight; + fn external_propose_majority(p: u32, ) -> Weight; + fn external_propose_default(p: u32, ) -> Weight; + fn fast_track(p: u32, ) -> Weight; + fn veto_external(v: u32, ) -> Weight; + fn cancel_referendum(r: u32, ) -> Weight; + fn cancel_queued(r: u32, ) -> Weight; + fn on_initialize_external(r: u32, ) -> Weight; + fn on_initialize_public(r: u32, ) -> Weight; + fn on_initialize_no_launch_no_maturing(r: u32, ) -> Weight; + fn delegate(r: u32, ) -> Weight; + fn undelegate(r: u32, ) -> Weight; + fn clear_public_proposals(p: u32, ) -> Weight; + fn note_preimage(b: u32, ) -> Weight; + fn note_imminent_preimage(b: u32, ) -> Weight; + fn reap_preimage(b: u32, ) -> Weight; + fn unlock_remove(r: u32, ) -> Weight; + fn unlock_set(r: u32, ) -> Weight; + fn remove_vote(r: u32, ) -> Weight; + fn remove_other_vote(r: u32, ) -> Weight; + fn enact_proposal_execute(b: u32, ) -> Weight; + fn enact_proposal_slash(b: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn propose(_p: u32, ) -> Weight { 1_000_000_000 } + fn second(_s: u32, ) -> Weight { 1_000_000_000 } + fn vote_new(_r: u32, ) -> Weight { 1_000_000_000 } + fn vote_existing(_r: u32, ) -> Weight { 1_000_000_000 } + fn emergency_cancel(_r: u32, ) -> Weight { 1_000_000_000 } + fn external_propose(_p: u32, _v: u32, ) -> Weight { 1_000_000_000 } + fn external_propose_majority(_p: u32, ) -> Weight { 1_000_000_000 } + fn external_propose_default(_p: u32, ) -> Weight { 1_000_000_000 } + fn fast_track(_p: u32, ) -> Weight { 1_000_000_000 } + fn veto_external(_v: u32, ) -> Weight { 1_000_000_000 } + fn cancel_referendum(_r: u32, ) -> Weight { 1_000_000_000 } + fn cancel_queued(_r: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize_external(_r: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize_public(_r: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize_no_launch_no_maturing(_r: u32, ) -> Weight { 1_000_000_000 } + fn delegate(_r: u32, ) -> Weight { 1_000_000_000 } + fn undelegate(_r: u32, ) -> Weight { 1_000_000_000 } + fn clear_public_proposals(_p: u32, ) -> Weight { 1_000_000_000 } + fn note_preimage(_b: u32, ) -> Weight { 1_000_000_000 } + fn note_imminent_preimage(_b: u32, ) -> Weight { 1_000_000_000 } + fn reap_preimage(_b: u32, ) -> Weight { 1_000_000_000 } + fn unlock_remove(_r: u32, ) -> Weight { 1_000_000_000 } + fn unlock_set(_r: u32, ) -> Weight { 1_000_000_000 } + fn remove_vote(_r: u32, ) -> Weight { 1_000_000_000 } + fn remove_other_vote(_r: u32, ) -> Weight { 1_000_000_000 } + fn enact_proposal_execute(_b: u32, ) -> Weight { 1_000_000_000 } + fn enact_proposal_slash(_b: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait + Sized { type Proposal: Parameter + Dispatchable + From>; type Event: From> + Into<::Event>; @@ -289,6 +349,9 @@ pub trait Trait: frame_system::Trait + Sized { /// Also used to compute weight, an overly big value can /// lead to extrinsic with very big weight: see `delegate` for instance. type MaxVotes: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } #[derive(Clone, Encode, Decode, RuntimeDebug)] diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 2f300ec8bc..78eb143407 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -116,6 +116,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); @@ -127,6 +128,7 @@ impl pallet_scheduler::Trait for Test { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; + type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -137,6 +139,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const LaunchPeriod: u64 = 2; @@ -199,6 +202,7 @@ impl super::Trait for Test { type MaxVotes = MaxVotes; type OperationalPreimageOrigin = EnsureSignedBy; type PalletsOrigin = OriginCaller; + type WeightInfo = (); } pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 63824dbf9c..c59ac59031 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -137,6 +137,38 @@ pub struct DefunctVoter { pub candidate_count: u32 } +pub trait WeightInfo { + fn vote(u: u32, ) -> Weight; + fn vote_update(u: u32, ) -> Weight; + fn remove_voter(u: u32, ) -> Weight; + fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight; + fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight; + fn submit_candidacy(c: u32, ) -> Weight; + fn renounce_candidacy_candidate(c: u32, ) -> Weight; + fn renounce_candidacy_member_runner_up(u: u32, ) -> Weight; + fn remove_member_without_replacement(c: u32, ) -> Weight; + fn remove_member_with_replacement(u: u32, ) -> Weight; + fn remove_member_wrong_refund(u: u32, ) -> Weight; + fn on_initialize(c: u32, ) -> Weight; + fn phragmen(c: u32, v: u32, e: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn vote(_u: u32, ) -> Weight { 1_000_000_000 } + fn vote_update(_u: u32, ) -> Weight { 1_000_000_000 } + fn remove_voter(_u: u32, ) -> Weight { 1_000_000_000 } + fn report_defunct_voter_correct(_c: u32, _v: u32, ) -> Weight { 1_000_000_000 } + fn report_defunct_voter_incorrect(_c: u32, _v: u32, ) -> Weight { 1_000_000_000 } + fn submit_candidacy(_c: u32, ) -> Weight { 1_000_000_000 } + fn renounce_candidacy_candidate(_c: u32, ) -> Weight { 1_000_000_000 } + fn renounce_candidacy_member_runner_up(_u: u32, ) -> Weight { 1_000_000_000 } + fn remove_member_without_replacement(_c: u32, ) -> Weight { 1_000_000_000 } + fn remove_member_with_replacement(_u: u32, ) -> Weight { 1_000_000_000 } + fn remove_member_wrong_refund(_u: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize(_c: u32, ) -> Weight { 1_000_000_000 } + fn phragmen(_c: u32, _v: u32, _e: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The overarching event type.c type Event: From> + Into<::Event>; @@ -184,6 +216,9 @@ pub trait Trait: frame_system::Trait { /// round will happen. If set to zero, no elections are ever triggered and the module will /// be in passive mode. type TermDuration: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { @@ -1093,6 +1128,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -1105,7 +1141,8 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Module; -} + type WeightInfo = (); + } parameter_types! { pub const CandidacyBond: u64 = 3; @@ -1213,6 +1250,7 @@ mod tests { type LoserCandidate = (); type KickedMember = (); type BadReport = (); + type WeightInfo = (); } pub type Block = sp_runtime::generic::Block; diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index b0be542ab7..c9b2523c4b 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -63,6 +63,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -74,6 +75,7 @@ impl pallet_balances::Trait for Test { type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -128,7 +130,7 @@ impl ChangeMembers for TestChangeMembers { } parameter_types!{ - pub const ElectionModuleId: LockIdentifier = *b"py/elect"; + pub const ElectionModuleId: LockIdentifier = *b"py/elect"; } impl elections::Trait for Test { diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs index b1f65e10e1..438ddbba87 100644 --- a/frame/evm/src/tests.rs +++ b/frame/evm/src/tests.rs @@ -61,6 +61,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -72,6 +73,7 @@ impl pallet_balances::Trait for Test { type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -81,6 +83,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } /// Fixed gas price of `0`. diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index b300809f41..e008fe4053 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -78,6 +78,7 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } type Extrinsic = TestXt, ()>; diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 65e2e494d1..8a6374f227 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -768,6 +768,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -778,6 +779,7 @@ mod tests { type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } impl Trait for Test { type Event = (); diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 9b0e4eab02..ce765cc8ca 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -569,6 +569,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } type Balance = u64; @@ -581,6 +582,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index aa692e65a8..0cbf383c1a 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -277,6 +277,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const WindowSize: u64 = 11; diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index 0f3d9fec74..c85920edf3 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -1135,6 +1135,7 @@ impl frame_system::Trait for ElevatedTrait { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for ElevatedTrait { type Balance = T::Balance; diff --git a/frame/generic-asset/src/mock.rs b/frame/generic-asset/src/mock.rs index a928c9d67b..f04957b950 100644 --- a/frame/generic-asset/src/mock.rs +++ b/frame/generic-asset/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl Trait for Test { diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 7da32c5958..991ada4fbf 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -118,6 +118,7 @@ impl frame_system::Trait for Test { type AccountData = balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl system::offchain::SendTransactionTypes for Test @@ -145,6 +146,7 @@ impl session::Trait for Test { type SessionHandler = ::KeyTypeIdProviders; type Keys = TestSessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } impl session::historical::Trait for Test { @@ -162,6 +164,7 @@ impl balances::Trait for Test { type Event = TestEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -172,6 +175,7 @@ impl timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } pallet_staking_reward_curve::build! { @@ -231,6 +235,7 @@ impl staking::Trait for Test { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = (); type MinSolutionScoreBump = (); + type WeightInfo = (); } parameter_types! { @@ -242,6 +247,7 @@ impl offences::Trait for Test { type IdentificationTuple = session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } impl Trait for Test { diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index b4c161aabb..f303a37198 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -85,6 +85,34 @@ mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub trait WeightInfo { + fn add_registrar(r: u32, ) -> Weight; + fn set_identity(r: u32, x: u32, ) -> Weight; + fn set_subs(p: u32, s: u32, ) -> Weight; + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn request_judgement(r: u32, x: u32, ) -> Weight; + fn cancel_request(r: u32, x: u32, ) -> Weight; + fn set_fee(r: u32, ) -> Weight; + fn set_account_id(r: u32, ) -> Weight; + fn set_fields(r: u32, ) -> Weight; + fn provide_judgement(r: u32, x: u32, ) -> Weight; + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn add_registrar(_r: u32, ) -> Weight { 1_000_000_000 } + fn set_identity(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } + fn set_subs(_p: u32, _s: u32, ) -> Weight { 1_000_000_000 } + fn clear_identity(_r: u32, _s: u32, _x: u32, ) -> Weight { 1_000_000_000 } + fn request_judgement(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } + fn cancel_request(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } + fn set_fee(_r: u32, ) -> Weight { 1_000_000_000 } + fn set_account_id(_r: u32, ) -> Weight { 1_000_000_000 } + fn set_fields(_r: u32, ) -> Weight { 1_000_000_000 } + fn provide_judgement(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } + fn kill_identity(_r: u32, _s: u32, _x: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From> + Into<::Event>; @@ -122,6 +150,9 @@ pub trait Trait: frame_system::Trait { /// The origin which may add or remove registrars. Root can always do this. type RegistrarOrigin: EnsureOrigin; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater @@ -1198,6 +1229,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -1208,6 +1240,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const BasicDeposit: u64 = 10; @@ -1243,6 +1276,7 @@ mod tests { type MaxRegistrars = MaxRegistrars; type RegistrarOrigin = EnsureOneOrRoot; type ForceOrigin = EnsureTwoOrRoot; + type WeightInfo = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index a755b5d2d1..cdc7fea00d 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -226,6 +226,18 @@ pub struct Heartbeat pub validators_len: u32, } +pub trait WeightInfo { + fn heartbeat(k: u32, e: u32, ) -> Weight; + fn validate_unsigned(k: u32, e: u32, ) -> Weight; + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn heartbeat(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn validate_unsigned(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn validate_unsigned_and_then_heartbeat(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: SendTransactionTypes> + pallet_session::historical::Trait { /// The identifier type for an authority. type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; @@ -254,6 +266,9 @@ pub trait Trait: SendTransactionTypes> + pallet_session::historical:: /// This is exposed so that it can be tuned for particular runtime, when /// multiple pallets send unsigned transactions. type UnsignedPriority: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_event!( diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 3bc1f4d3f3..968aad1f95 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -134,6 +134,7 @@ impl frame_system::Trait for Runtime { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -155,6 +156,7 @@ impl pallet_session::Trait for Runtime { type Event = (); type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); } impl pallet_session::historical::Trait for Runtime { @@ -183,6 +185,7 @@ impl Trait for Runtime { type ReportUnresponsiveness = OffenceHandler; type SessionDuration = Period; type UnsignedPriority = UnsignedPriority; + type WeightInfo = (); } impl frame_system::offchain::SendTransactionTypes for Runtime where diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index e58112403f..5224e03347 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::{ use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; -use frame_support::weights::constants::WEIGHT_PER_MICROS; +use frame_support::weights::{Weight, constants::WEIGHT_PER_MICROS}; use frame_system::{ensure_signed, ensure_root}; use self::address::Address as RawAddress; @@ -40,6 +40,22 @@ mod benchmarking; pub type Address = RawAddress<::AccountId, ::AccountIndex>; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub trait WeightInfo { + fn claim(i: u32, ) -> Weight; + fn transfer(i: u32, ) -> Weight; + fn free(i: u32, ) -> Weight; + fn force_transfer(i: u32, ) -> Weight; + fn freeze(i: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn claim(_i: u32, ) -> Weight { 1_000_000_000 } + fn transfer(_i: u32, ) -> Weight { 1_000_000_000 } + fn free(_i: u32, ) -> Weight { 1_000_000_000 } + fn force_transfer(_i: u32, ) -> Weight { 1_000_000_000 } + fn freeze(_i: u32, ) -> Weight { 1_000_000_000 } +} + /// The module's config trait. pub trait Trait: frame_system::Trait { /// Type used for storing an account's index; implies the maximum number of accounts the system @@ -54,6 +70,9 @@ pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From> + Into<::Event>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index da30c129c3..97e7a954f8 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -74,6 +74,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -86,6 +87,7 @@ impl pallet_balances::Trait for Test { type Event = MetaEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -97,6 +99,7 @@ impl Trait for Test { type Currency = Balances; type Deposit = Deposit; type Event = MetaEvent; + type WeightInfo = (); } pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index bf6c7ec486..3a99f8346d 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -329,6 +329,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } ord_parameter_types! { pub const One: u64 = 1; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 388981cb8f..35727a1cac 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -64,6 +64,32 @@ type BalanceOf = <::Currency as Currency<; +pub trait WeightInfo { + fn as_multi_threshold_1(z: u32, ) -> Weight; + fn as_multi_create(s: u32, z: u32, ) -> Weight; + fn as_multi_create_store(s: u32, z: u32, ) -> Weight; + fn as_multi_approve(s: u32, z: u32, ) -> Weight; + fn as_multi_complete(s: u32, z: u32, ) -> Weight; + fn approve_as_multi_create(s: u32, z: u32, ) -> Weight; + fn approve_as_multi_approve(s: u32, z: u32, ) -> Weight; + fn approve_as_multi_complete(s: u32, z: u32, ) -> Weight; + fn cancel_as_multi(s: u32, z: u32, ) -> Weight; + fn cancel_as_multi_store(s: u32, z: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn as_multi_threshold_1(_z: u32, ) -> Weight { 1_000_000_000 } + fn as_multi_create(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn as_multi_create_store(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn as_multi_approve(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn as_multi_complete(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn approve_as_multi_create(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn approve_as_multi_approve(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn approve_as_multi_complete(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn cancel_as_multi(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn cancel_as_multi_store(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } +} + /// Configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -91,6 +117,9 @@ pub trait Trait: frame_system::Trait { /// The maximum amount of signatories allowed in the multisig. type MaxSignatories: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// A global extrinsic index, formed as the extrinsic index within a block, together with that diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 4911ca90cf..888dcecb3a 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -84,6 +84,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -94,6 +95,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const DepositBase: u64 = 1; @@ -118,6 +120,7 @@ impl Trait for Test { type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; + type WeightInfo = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 27a0dedd7e..4602146c9c 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -290,6 +290,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -300,6 +301,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const ReservationFee: u64 = 2; diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 90ad7eeb3c..ad6e8a14d5 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -66,6 +66,7 @@ impl frame_system::Trait for Test { type BlockExecutionWeight = (); type ExtrinsicBaseWeight = (); type MaximumExtrinsicWeight = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; @@ -76,6 +77,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -85,6 +87,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { type FullIdentification = pallet_staking::Exposure; @@ -127,6 +130,7 @@ impl pallet_session::Trait for Test { type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); + type WeightInfo = (); } pallet_staking_reward_curve::build! { const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( @@ -178,6 +182,7 @@ impl pallet_staking::Trait for Test { type UnsignedPriority = (); type MaxIterations = (); type MinSolutionScoreBump = (); + type WeightInfo = (); } impl pallet_im_online::Trait for Test { @@ -186,6 +191,7 @@ impl pallet_im_online::Trait for Test { type SessionDuration = Period; type ReportUnresponsiveness = Offences; type UnsignedPriority = (); + type WeightInfo = (); } parameter_types! { @@ -197,6 +203,7 @@ impl pallet_offences::Trait for Test { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } impl frame_system::offchain::SendTransactionTypes for Test where Call: From { diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 5899c22fb0..fe4662efa8 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -51,6 +51,20 @@ pub type DeferredOffenceOf = ( SessionIndex, ); +pub trait WeightInfo { + fn report_offence_im_online(r: u32, o: u32, n: u32, ) -> Weight; + fn report_offence_grandpa(r: u32, n: u32, ) -> Weight; + fn report_offence_babe(r: u32, n: u32, ) -> Weight; + fn on_initialize(d: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn report_offence_im_online(_r: u32, _o: u32, _n: u32, ) -> Weight { 1_000_000_000 } + fn report_offence_grandpa(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } + fn report_offence_babe(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize(_d: u32, ) -> Weight { 1_000_000_000 } +} + /// Offences trait pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -63,6 +77,8 @@ pub trait Trait: frame_system::Trait { /// `on_initialize`. /// Note it's going to be exceeded before we stop adding to it, so it has to be set conservatively. type WeightSoftLimit: Get; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 6c89072a0f..f981e70835 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -120,6 +120,7 @@ impl frame_system::Trait for Runtime { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -131,6 +132,7 @@ impl Trait for Runtime { type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } mod offences { diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index fb72fa8953..79879a51fa 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -43,7 +43,7 @@ use frame_support::{ decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, traits::{ Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, - }, weights::{GetDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, + }, weights::{Weight, GetDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, dispatch::{PostDispatchInfo, IsSubType}, }; use frame_system::{self as system, ensure_signed}; @@ -53,6 +53,24 @@ mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub trait WeightInfo { + fn proxy(p: u32, ) -> Weight; + fn add_proxy(p: u32, ) -> Weight; + fn remove_proxy(p: u32, ) -> Weight; + fn remove_proxies(p: u32, ) -> Weight; + fn anonymous(p: u32, ) -> Weight; + fn kill_anonymous(p: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn proxy(_p: u32, ) -> Weight { 1_000_000_000 } + fn add_proxy(_p: u32, ) -> Weight { 1_000_000_000 } + fn remove_proxy(_p: u32, ) -> Weight { 1_000_000_000 } + fn remove_proxies(_p: u32, ) -> Weight { 1_000_000_000 } + fn anonymous(_p: u32, ) -> Weight { 1_000_000_000 } + fn kill_anonymous(_p: u32, ) -> Weight { 1_000_000_000 } +} + /// Configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -87,6 +105,9 @@ pub trait Trait: frame_system::Trait { /// The maximum amount of proxies allowed for a single account. type MaxProxies: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 63d5c9e575..11f11e24d4 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -86,6 +86,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -96,10 +97,12 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } impl pallet_utility::Trait for Test { type Event = TestEvent; type Call = Call; + type WeightInfo = (); } parameter_types! { pub const ProxyDepositBase: u64 = 1; @@ -144,6 +147,7 @@ impl Trait for Test { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; + type WeightInfo = (); } type System = frame_system::Module; diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 0cf44de679..4f7c4ef5e8 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -182,6 +182,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } type System = frame_system::Module; diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 101778f3ea..b0030176bb 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -88,6 +88,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -100,6 +101,7 @@ impl pallet_balances::Trait for Test { type Event = TestEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 1b3517382f..6940c1ca45 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -64,6 +64,22 @@ use frame_support::{ }; use frame_system::{self as system}; +pub trait WeightInfo { + fn schedule(s: u32, ) -> Weight; + fn cancel(s: u32, ) -> Weight; + fn schedule_named(s: u32, ) -> Weight; + fn cancel_named(s: u32, ) -> Weight; + fn on_initialize(s: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn schedule(_s: u32, ) -> Weight { 1_000_000_000 } + fn cancel(_s: u32, ) -> Weight { 1_000_000_000 } + fn schedule_named(_s: u32, ) -> Weight { 1_000_000_000 } + fn cancel_named(_s: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize(_s: u32, ) -> Weight { 1_000_000_000 } +} + /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits /// should be added to our implied traits list. @@ -89,6 +105,9 @@ pub trait Trait: system::Trait { /// Required origin to schedule or cancel calls. type ScheduleOrigin: EnsureOrigin<::Origin>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// Just a simple index for naming period tasks. @@ -655,6 +674,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl logger::Trait for Test { type Event = (); @@ -673,6 +693,7 @@ mod tests { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureOneOf, EnsureSignedBy>; + type WeightInfo = (); } type System = system::Module; type Logger = logger::Module; diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 87a56ca27d..9804f087f8 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -79,6 +79,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { @@ -87,6 +88,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } thread_local! { diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index ee04f1a046..ee99d72187 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -81,7 +81,8 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = (Balances,); + type OnKilledAccount = Balances; + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; @@ -92,6 +93,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -101,6 +103,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { type FullIdentification = pallet_staking::Exposure; @@ -138,6 +141,7 @@ impl pallet_session::Trait for Test { type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); + type WeightInfo = (); } pallet_staking_reward_curve::build! { const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( @@ -185,6 +189,7 @@ impl pallet_staking::Trait for Test { type UnsignedPriority = UnsignedPriority; type MaxIterations = (); type MinSolutionScoreBump = (); + type WeightInfo = (); } impl crate::Trait for Test {} diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 0cd77af7c8..668d9b8328 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -351,6 +351,16 @@ impl ValidatorRegistration for Module { } } +pub trait WeightInfo { + fn set_keys(n: u32, ) -> Weight; + fn purge_keys(n: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn set_keys(_n: u32, ) -> Weight { 1_000_000_000 } + fn purge_keys(_n: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From + Into<::Event>; @@ -385,6 +395,9 @@ pub trait Trait: frame_system::Trait { /// After the threshold is reached `disabled` method starts to return true, /// which in combination with `pallet_staking` forces a new era. type DisabledValidatorsThreshold: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 51ca3bc790..57991ad7c4 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -197,12 +197,14 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { @@ -222,6 +224,7 @@ impl Trait for Test { type Event = (); type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = (); + type WeightInfo = (); } #[cfg(feature = "historical")] diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 89a0691b93..7273b02acd 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -89,6 +89,7 @@ impl frame_system::Trait for Test { type OnNewAccount = (); type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { @@ -97,6 +98,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } impl Trait for Test { diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index d1e471fadb..89213ea978 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -81,6 +81,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (Balances,); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; @@ -91,12 +92,14 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } impl pallet_indices::Trait for Test { type AccountIndex = AccountIndex; type Event = (); type Currency = Balances; type Deposit = (); + type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; @@ -105,6 +108,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { type FullIdentification = pallet_staking::Exposure; @@ -142,6 +146,7 @@ impl pallet_session::Trait for Test { type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); + type WeightInfo = (); } pallet_staking_reward_curve::build! { const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( @@ -189,4 +194,5 @@ impl pallet_staking::Trait for Test { type MinSolutionScoreBump = (); type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = (); + type WeightInfo = (); } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 1049096887..924937bf36 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -829,6 +829,68 @@ pub mod weight { } } +pub trait WeightInfo { + fn bond(u: u32, ) -> Weight; + fn bond_extra(u: u32, ) -> Weight; + fn unbond(u: u32, ) -> Weight; + fn withdraw_unbonded_update(s: u32, ) -> Weight; + fn withdraw_unbonded_kill(s: u32, ) -> Weight; + fn validate(u: u32, ) -> Weight; + fn nominate(n: u32, ) -> Weight; + fn chill(u: u32, ) -> Weight; + fn set_payee(u: u32, ) -> Weight; + fn set_controller(u: u32, ) -> Weight; + fn set_validator_count(c: u32, ) -> Weight; + fn force_no_eras(i: u32, ) -> Weight; + fn force_new_era(i: u32, ) -> Weight; + fn force_new_era_always(i: u32, ) -> Weight; + fn set_invulnerables(v: u32, ) -> Weight; + fn force_unstake(s: u32, ) -> Weight; + fn cancel_deferred_slash(s: u32, ) -> Weight; + fn payout_stakers(n: u32, ) -> Weight; + fn payout_stakers_alive_controller(n: u32, ) -> Weight; + fn rebond(l: u32, ) -> Weight; + fn set_history_depth(e: u32, ) -> Weight; + fn reap_stash(s: u32, ) -> Weight; + fn new_era(v: u32, n: u32, ) -> Weight; + fn do_slash(l: u32, ) -> Weight; + fn payout_all(v: u32, n: u32, ) -> Weight; + fn submit_solution_initial(v: u32, n: u32, a: u32, w: u32, ) -> Weight; + fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight; + fn submit_solution_weaker(v: u32, n: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn bond(_u: u32, ) -> Weight { 1_000_000_000 } + fn bond_extra(_u: u32, ) -> Weight { 1_000_000_000 } + fn unbond(_u: u32, ) -> Weight { 1_000_000_000 } + fn withdraw_unbonded_update(_s: u32, ) -> Weight { 1_000_000_000 } + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { 1_000_000_000 } + fn validate(_u: u32, ) -> Weight { 1_000_000_000 } + fn nominate(_n: u32, ) -> Weight { 1_000_000_000 } + fn chill(_u: u32, ) -> Weight { 1_000_000_000 } + fn set_payee(_u: u32, ) -> Weight { 1_000_000_000 } + fn set_controller(_u: u32, ) -> Weight { 1_000_000_000 } + fn set_validator_count(_c: u32, ) -> Weight { 1_000_000_000 } + fn force_no_eras(_i: u32, ) -> Weight { 1_000_000_000 } + fn force_new_era(_i: u32, ) -> Weight { 1_000_000_000 } + fn force_new_era_always(_i: u32, ) -> Weight { 1_000_000_000 } + fn set_invulnerables(_v: u32, ) -> Weight { 1_000_000_000 } + fn force_unstake(_s: u32, ) -> Weight { 1_000_000_000 } + fn cancel_deferred_slash(_s: u32, ) -> Weight { 1_000_000_000 } + fn payout_stakers(_n: u32, ) -> Weight { 1_000_000_000 } + fn payout_stakers_alive_controller(_n: u32, ) -> Weight { 1_000_000_000 } + fn rebond(_l: u32, ) -> Weight { 1_000_000_000 } + fn set_history_depth(_e: u32, ) -> Weight { 1_000_000_000 } + fn reap_stash(_s: u32, ) -> Weight { 1_000_000_000 } + fn new_era(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } + fn do_slash(_l: u32, ) -> Weight { 1_000_000_000 } + fn payout_all(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } + fn submit_solution_initial(_v: u32, _n: u32, _a: u32, _w: u32, ) -> Weight { 1_000_000_000 } + fn submit_solution_better(_v: u32, _n: u32, _a: u32, _w: u32, ) -> Weight { 1_000_000_000 } + fn submit_solution_weaker(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait + SendTransactionTypes> { /// The staking balance. type Currency: LockableCurrency; @@ -915,6 +977,9 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { /// This is exposed so that it can be tuned for particular runtime, when /// multiple pallets send unsigned transactions. type UnsignedPriority: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// Mode of era-forcing. diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 3860dba90f..34f2d001a0 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -224,6 +224,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { type Balance = Balance; @@ -231,6 +232,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const Offset: BlockNumber = 0; @@ -252,6 +254,7 @@ impl pallet_session::Trait for Test { type ValidatorIdOf = crate::StashOf; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); } impl pallet_session::historical::Trait for Test { @@ -271,6 +274,7 @@ impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } pallet_staking_reward_curve::build! { const I_NPOS: PiecewiseLinear<'static> = curve!( @@ -326,6 +330,7 @@ impl Trait for Test { type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = UnsignedPriority; + type WeightInfo = (); } impl frame_system::offchain::SendTransactionTypes for Test where diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 74612fa879..cc9c91f3a4 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -145,6 +145,7 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } // Implement the logger module's `Trait` on the Test runtime. diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 56fd4b8c35..1b64b813e5 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -85,6 +85,7 @@ impl system::Trait for Runtime { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl module::Trait for Runtime { diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 9e41ff2016..c2c953fb97 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -75,6 +75,7 @@ impl frame_system::Trait for Test { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl crate::Trait for Test {} diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 3536d6fc71..ad68e97d46 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -158,6 +158,28 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { H::ordered_trie_root(xts) } +pub trait WeightInfo { + fn remark(b: u32, ) -> Weight; + fn set_heap_pages(i: u32, ) -> Weight; + fn set_code_without_checks(b: u32, ) -> Weight; + fn set_changes_trie_config(d: u32, ) -> Weight; + fn set_storage(i: u32, ) -> Weight; + fn kill_storage(i: u32, ) -> Weight; + fn kill_prefix(p: u32, ) -> Weight; + fn suicide(n: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn remark(_b: u32, ) -> Weight { 1_000_000_000 } + fn set_heap_pages(_i: u32, ) -> Weight { 1_000_000_000 } + fn set_code_without_checks(_b: u32, ) -> Weight { 1_000_000_000 } + fn set_changes_trie_config(_d: u32, ) -> Weight { 1_000_000_000 } + fn set_storage(_i: u32, ) -> Weight { 1_000_000_000 } + fn kill_storage(_i: u32, ) -> Weight { 1_000_000_000 } + fn kill_prefix(_p: u32, ) -> Weight { 1_000_000_000 } + fn suicide(_n: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: 'static + Eq + Clone { /// The basic call filter to use in Origin. All origins are built with this filter as base, /// except Root. @@ -262,6 +284,8 @@ pub trait Trait: 'static + Eq + Clone { /// /// All resources should be cleaned up associated with the given account. type OnKilledAccount: OnKilledAccount; + + type SystemWeightInfo: WeightInfo; } pub type DigestOf = generic::Digest<::Hash>; diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 0484b34ba3..d7c4d1c9e7 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -105,6 +105,7 @@ impl Trait for Test { type AccountData = u32; type OnNewAccount = (); type OnKilledAccount = RecordKilled; + type SystemWeightInfo = (); } pub type System = Module; diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index db15166e17..efcd440f90 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -115,6 +115,16 @@ use sp_timestamp::{ OnTimestampSet, }; +pub trait WeightInfo { + fn set(t: u32, ) -> Weight; + fn on_finalize(t: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn set(_t: u32, ) -> Weight { 1_000_000_000 } + fn on_finalize(_t: u32, ) -> Weight { 1_000_000_000 } +} + /// The module configuration trait pub trait Trait: frame_system::Trait { /// Type used for expressing timestamp. @@ -129,6 +139,9 @@ pub trait Trait: frame_system::Trait { /// work with this to determine a sensible block time. e.g. For Aura, it will be double this /// period on default settings. type MinimumPeriod: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_module! { @@ -338,6 +351,7 @@ mod tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; @@ -346,6 +360,7 @@ mod tests { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } type Timestamp = Module; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index b993a85da3..96fbd1068d 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -581,6 +581,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { @@ -593,6 +594,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } thread_local! { static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index bb139c4cc6..0b6f9cb7fc 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -111,6 +111,30 @@ type BalanceOf = <::Currency as Currency< = <::Currency as Currency<::AccountId>>::PositiveImbalance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub trait WeightInfo { + fn propose_spend(u: u32, ) -> Weight; + fn reject_proposal(u: u32, ) -> Weight; + fn approve_proposal(u: u32, ) -> Weight; + fn report_awesome(r: u32, ) -> Weight; + fn retract_tip(r: u32, ) -> Weight; + fn tip_new(r: u32, t: u32, ) -> Weight; + fn tip(t: u32, ) -> Weight; + fn close_tip(t: u32, ) -> Weight; + fn on_initialize(p: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn propose_spend(_u: u32, ) -> Weight { 1_000_000_000 } + fn reject_proposal(_u: u32, ) -> Weight { 1_000_000_000 } + fn approve_proposal(_u: u32, ) -> Weight { 1_000_000_000 } + fn report_awesome(_r: u32, ) -> Weight { 1_000_000_000 } + fn retract_tip(_r: u32, ) -> Weight { 1_000_000_000 } + fn tip_new(_r: u32, _t: u32, ) -> Weight { 1_000_000_000 } + fn tip(_t: u32, ) -> Weight { 1_000_000_000 } + fn close_tip(_t: u32, ) -> Weight { 1_000_000_000 } + fn on_initialize(_p: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The treasury's module id, used for deriving its sovereign account ID. type ModuleId: Get; @@ -159,6 +183,9 @@ pub trait Trait: frame_system::Trait { /// Percentage of spare funds (if any) that are burnt per spend period. type Burn: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } /// An index of a proposal. Just a `u32`. diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 68820ffd5d..2fa960f1c7 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -84,6 +84,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -94,6 +95,7 @@ impl pallet_balances::Trait for Test { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); @@ -147,6 +149,7 @@ impl Trait for Test { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type WeightInfo = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index ab50cf213b..0b2697f4e4 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -70,6 +70,16 @@ use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; mod tests; mod benchmarking; +pub trait WeightInfo { + fn batch(c: u32, ) -> Weight; + fn as_derivative(u: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn batch(_c: u32, ) -> Weight { 1_000_000_000 } + fn as_derivative(_u: u32, ) -> Weight { 1_000_000_000 } +} + /// Configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -79,6 +89,9 @@ pub trait Trait: frame_system::Trait { type Call: Parameter + Dispatchable + GetDispatchInfo + From> + UnfilteredDispatchable; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index bf04378e54..6de70506e4 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -83,6 +83,7 @@ impl frame_system::Trait for Test { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -93,6 +94,7 @@ impl pallet_balances::Trait for Test { type Event = TestEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const MultisigDepositBase: u64 = 1; @@ -113,6 +115,7 @@ impl Filter for TestBaseCallFilter { impl Trait for Test { type Event = TestEvent; type Call = Call; + type WeightInfo = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 32fa8ce441..cc9cbfac93 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -53,10 +53,10 @@ use codec::{Encode, Decode}; use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ StaticLookup, Zero, AtLeast32BitUnsigned, MaybeSerializeDeserialize, Convert }}; -use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; +use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure, weights::Weight}; use frame_support::traits::{ Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, - ExistenceRequirement, Get + ExistenceRequirement, Get, }; use frame_system::{ensure_signed, ensure_root}; @@ -64,6 +64,22 @@ mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub trait WeightInfo { + fn vest_locked(l: u32, ) -> Weight; + fn vest_unlocked(l: u32, ) -> Weight; + fn vest_other_locked(l: u32, ) -> Weight; + fn vest_other_unlocked(l: u32, ) -> Weight; + fn vested_transfer(l: u32, ) -> Weight; +} + +impl WeightInfo for () { + fn vest_locked(_l: u32, ) -> Weight { 1_000_000_000 } + fn vest_unlocked(_l: u32, ) -> Weight { 1_000_000_000 } + fn vest_other_locked(_l: u32, ) -> Weight { 1_000_000_000 } + fn vest_other_unlocked(_l: u32, ) -> Weight { 1_000_000_000 } + fn vested_transfer(_l: u32, ) -> Weight { 1_000_000_000 } +} + pub trait Trait: frame_system::Trait { /// The overarching event type. type Event: From> + Into<::Event>; @@ -76,6 +92,9 @@ pub trait Trait: frame_system::Trait { /// The minimum amount transferred to call `vested_transfer`. type MinVestedTransfer: Get>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } const VESTING_ID: LockIdentifier = *b"vesting "; @@ -446,6 +465,7 @@ mod tests { type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { type Balance = u64; @@ -453,6 +473,7 @@ mod tests { type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; @@ -462,6 +483,7 @@ mod tests { type Currency = Balances; type BlockNumberToBalance = Identity; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 2b94828e25..002658fe97 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -445,6 +445,7 @@ impl frame_system::Trait for Runtime { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl pallet_timestamp::Trait for Runtime { @@ -452,6 +453,7 @@ impl pallet_timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { -- GitLab From c9b08fac3add46d9c2e99866b67933bb452576a2 Mon Sep 17 00:00:00 2001 From: David Craven Date: Thu, 9 Jul 2020 04:46:53 +0200 Subject: [PATCH 138/144] Fix build when with-kvdb-rocksdb is disabled. (#6546) --- client/db/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index d66d5abfea..b531001cf9 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -36,7 +36,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -#[cfg(any(feature = "with-kvdb-rocksdb", feature = "test-helpers", test))] +#[cfg(any(feature = "with-kvdb-rocksdb", feature = "with-parity-db", feature = "test-helpers", test))] pub const NUM_COLUMNS: u32 = 11; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; -- GitLab From 467dd0594c609f7cbe462b49a72d9c7d749c446e Mon Sep 17 00:00:00 2001 From: Alan Sapede Date: Thu, 9 Jul 2020 05:51:21 -0400 Subject: [PATCH 139/144] Make evm errors public (#6598) --- frame/evm/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index bb08592ecd..e2d99827dc 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -40,7 +40,8 @@ use sp_runtime::{ DispatchResult, traits::{UniqueSaturatedInto, AccountIdConversion, SaturatedConversion}, }; use sha3::{Digest, Keccak256}; -use evm::{ExitReason, ExitSucceed, ExitError, Config}; +pub use evm::{ExitReason, ExitSucceed, ExitError, ExitRevert, ExitFatal}; +use evm::Config; use evm::executor::StackExecutor; use evm::backend::ApplyBackend; -- GitLab From b9d96fabd2462cb42862fd0a117ed42bb98401aa Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 9 Jul 2020 12:10:34 +0200 Subject: [PATCH 140/144] Improved send_transaction helper to return an error in case of error (#6592) --- Cargo.lock | 6 +- test-utils/client/Cargo.toml | 22 +++--- test-utils/client/src/lib.rs | 138 +++++++++++++++++++++++++++++++---- 3 files changed, 139 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 383feaa0ad..72850236af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7312,9 +7312,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" +checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" dependencies = [ "itoa", "ryu", @@ -8473,6 +8473,8 @@ dependencies = [ "sc-executor", "sc-light", "sc-service", + "serde", + "serde_json", "sp-blockchain", "sp-consensus", "sp-core", diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index e9036bc77a..04fd898a70 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -12,20 +12,22 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc4", path = "../../client/api" } -sc-light = { version = "2.0.0-rc4", path = "../../client/light" } -sc-client-db = { version = "0.8.0-rc4", features = ["test-helpers"], path = "../../client/db" } -sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } -sc-executor = { version = "0.8.0-rc4", path = "../../client/executor" } -sc-consensus = { version = "0.8.0-rc4", path = "../../client/consensus/common" } -sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../client/service" } +codec = { package = "parity-scale-codec", version = "1.3.1" } futures = "0.3.4" futures01 = { package = "futures", version = "0.1.29" } hash-db = "0.15.2" hex = "0.4" -sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +serde = "1.0.55" +serde_json = "1.0.55" +sc-client-api = { version = "2.0.0-rc4", path = "../../client/api" } +sc-client-db = { version = "0.8.0-rc4", features = ["test-helpers"], path = "../../client/db" } +sc-consensus = { version = "0.8.0-rc4", path = "../../client/consensus/common" } +sc-executor = { version = "0.8.0-rc4", path = "../../client/executor" } +sc-light = { version = "2.0.0-rc4", path = "../../client/light" } +sc-service = { version = "0.8.0-rc4", default-features = false, features = ["test-helpers"], path = "../../client/service" } +sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-rc4", path = "../../primitives/consensus/common" } sp-core = { version = "2.0.0-rc4", path = "../../primitives/core" } +sp-keyring = { version = "2.0.0-rc4", path = "../../primitives/keyring" } sp-runtime = { version = "2.0.0-rc4", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0-rc4", path = "../../primitives/blockchain" } sp-state-machine = { version = "0.8.0-rc4", path = "../../primitives/state-machine" } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index fef9acd9d2..fd5b0e2919 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -43,6 +43,7 @@ use std::pin::Pin; use std::sync::Arc; use std::collections::{HashSet, HashMap}; use futures::{future::{Future, FutureExt}, stream::StreamExt}; +use serde::Deserialize; use sp_core::storage::ChildInfo; use sp_runtime::{OpaqueExtrinsic, codec::Encode, traits::{Block as BlockT, BlakeTwo256}}; use sc_service::client::{LocalCallExecutor, ClientConfig}; @@ -259,32 +260,53 @@ impl TestClientBuilder< } } +/// The output of an RPC transaction. +pub struct RpcTransactionOutput { + /// The output string of the transaction if any. + pub result: Option, + /// The session object. + pub session: RpcSession, + /// An async receiver if data will be returned via a callback. + pub receiver: futures01::sync::mpsc::Receiver, +} + +impl std::fmt::Debug for RpcTransactionOutput { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "RpcTransactionOutput {{ result: {:?}, session, receiver }}", self.result) + } +} + +/// An error for when the RPC call fails. +#[derive(Deserialize, Debug)] +pub struct RpcTransactionError { + /// A Number that indicates the error type that occurred. + pub code: i64, + /// A String providing a short description of the error. + pub message: String, + /// A Primitive or Structured value that contains additional information about the error. + pub data: Option, +} + +impl std::fmt::Display for RpcTransactionError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + std::fmt::Debug::fmt(self, f) + } +} + /// An extension trait for `RpcHandlers`. pub trait RpcHandlersExt { /// Send a transaction through the RpcHandlers. fn send_transaction( &self, extrinsic: OpaqueExtrinsic, - ) -> Pin, - RpcSession, - futures01::sync::mpsc::Receiver, - ), - > + Send>>; + ) -> Pin> + Send>>; } impl RpcHandlersExt for RpcHandlers { fn send_transaction( &self, extrinsic: OpaqueExtrinsic, - ) -> Pin, - RpcSession, - futures01::sync::mpsc::Receiver, - ), - > + Send>> { + ) -> Pin> + Send>> { let (tx, rx) = futures01::sync::mpsc::channel(0); let mem = RpcSession::new(tx.into()); Box::pin(self @@ -300,10 +322,39 @@ impl RpcHandlersExt for RpcHandlers { hex::encode(extrinsic.encode()) ), ) - .map(move |res| (res, mem, rx))) + .map(move |result| parse_rpc_result(result, mem, rx)) + ) } } +pub(crate) fn parse_rpc_result( + result: Option, + session: RpcSession, + receiver: futures01::sync::mpsc::Receiver, +) -> Result { + if let Some(ref result) = result { + let json: serde_json::Value = serde_json::from_str(result) + .expect("the result can only be a JSONRPC string; qed"); + let error = json + .as_object() + .expect("JSON result is always an object; qed") + .get("error"); + + if let Some(error) = error { + return Err( + serde_json::from_value(error.clone()) + .expect("the JSONRPC result's error is always valid; qed") + ) + } + } + + Ok(RpcTransactionOutput { + result, + session, + receiver, + }) +} + /// An extension trait for `BlockchainEvents`. pub trait BlockchainEventsExt where @@ -336,3 +387,60 @@ where }) } } + +#[cfg(test)] +mod tests { + use sc_service::RpcSession; + + fn create_session_and_receiver() -> (RpcSession, futures01::sync::mpsc::Receiver) { + let (tx, rx) = futures01::sync::mpsc::channel(0); + let mem = RpcSession::new(tx.into()); + + (mem, rx) + } + + #[test] + fn parses_error_properly() { + let (mem, rx) = create_session_and_receiver(); + assert!(super::parse_rpc_result(None, mem, rx).is_ok()); + + let (mem, rx) = create_session_and_receiver(); + assert!( + super::parse_rpc_result(Some(r#"{ + "jsonrpc": "2.0", + "result": 19, + "id": 1 + }"#.to_string()), mem, rx) + .is_ok(), + ); + + let (mem, rx) = create_session_and_receiver(); + let error = super::parse_rpc_result(Some(r#"{ + "jsonrpc": "2.0", + "error": { + "code": -32601, + "message": "Method not found" + }, + "id": 1 + }"#.to_string()), mem, rx) + .unwrap_err(); + assert_eq!(error.code, -32601); + assert_eq!(error.message, "Method not found"); + assert!(error.data.is_none()); + + let (mem, rx) = create_session_and_receiver(); + let error = super::parse_rpc_result(Some(r#"{ + "jsonrpc": "2.0", + "error": { + "code": -32601, + "message": "Method not found", + "data": 42 + }, + "id": 1 + }"#.to_string()), mem, rx) + .unwrap_err(); + assert_eq!(error.code, -32601); + assert_eq!(error.message, "Method not found"); + assert!(error.data.is_some()); + } +} -- GitLab From d0e36db99a134f5fc92b9998669acfd23c8d03a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 9 Jul 2020 14:28:05 +0200 Subject: [PATCH 141/144] Rename `CheckEra` to `CheckMortality` (#6619) --- frame/system/src/extensions/check_mortality.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index cc7496df9a..7e3f65d032 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -54,8 +54,7 @@ impl SignedExtension for CheckMortality { type Call = T::Call; type AdditionalSigned = T::Hash; type Pre = (); - // TODO [#6483] rename to CheckMortality - const IDENTIFIER: &'static str = "CheckEra"; + const IDENTIFIER: &'static str = "CheckMortality"; fn validate( &self, -- GitLab From 3a3f550d09ab179102a7fc4754c0c12256b1a6ef Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 9 Jul 2020 14:32:55 +0200 Subject: [PATCH 142/144] Add an authority_discovery_known_authorities_count metric (#6614) --- client/authority-discovery/src/addr_cache.rs | 5 +++++ client/authority-discovery/src/lib.rs | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/client/authority-discovery/src/addr_cache.rs b/client/authority-discovery/src/addr_cache.rs index 96f589c5d3..0a27c1c443 100644 --- a/client/authority-discovery/src/addr_cache.rs +++ b/client/authority-discovery/src/addr_cache.rs @@ -68,6 +68,11 @@ where self.cache.insert(id, addresses); } + /// Returns the number of authority IDs in the cache. + pub fn num_ids(&self) -> usize { + self.cache.len() + } + // Each node should connect to a subset of all authorities. In order to prevent hot spots, this // selection is based on randomness. Selecting randomly each time we alter the address cache // would result in connection churn. To reduce this churn a node generates a seed on startup and diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index ba1c9f0fa8..1a4473d665 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -481,6 +481,11 @@ where if !remote_addresses.is_empty() { self.addr_cache.insert(authority_id.clone(), remote_addresses); + if let Some(metrics) = &self.metrics { + metrics.known_authorities_count.set( + self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) + ); + } self.update_peer_set_priority_group()?; } @@ -651,6 +656,7 @@ pub(crate) struct Metrics { request: Counter, dht_event_received: CounterVec, handle_value_found_event_failure: Counter, + known_authorities_count: Gauge, priority_group_size: Gauge, } @@ -697,6 +703,13 @@ impl Metrics { )?, registry, )?, + known_authorities_count: register( + Gauge::new( + "authority_discovery_known_authorities_count", + "Number of authorities known by authority discovery." + )?, + registry, + )?, priority_group_size: register( Gauge::new( "authority_discovery_priority_group_size", -- GitLab From 7f12fd923bcad41b621c61f32bee38a122dfcedf Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 9 Jul 2020 14:58:29 +0200 Subject: [PATCH 143/144] Clean exit when no space left on device (#6339) Fixes #6305 --- Cargo.lock | 1 + client/api/src/leaves.rs | 6 ++--- client/db/src/cache/mod.rs | 3 ++- client/db/src/changes_tries_storage.rs | 2 +- client/db/src/children.rs | 4 +-- client/db/src/lib.rs | 10 +++---- client/db/src/light.rs | 10 ++++--- client/db/src/offchain.rs | 9 +++++-- client/db/src/parity_db.rs | 6 +++-- client/db/src/utils.rs | 2 +- primitives/blockchain/Cargo.toml | 1 + primitives/blockchain/src/error.rs | 2 ++ primitives/database/src/error.rs | 35 ++++++++++++++++++++++++ primitives/database/src/kvdb.rs | 6 ++--- primitives/database/src/lib.rs | 37 ++++++++++++++------------ primitives/database/src/mem.rs | 6 +++-- 16 files changed, 97 insertions(+), 43 deletions(-) create mode 100644 primitives/database/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 72850236af..0499f75553 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7654,6 +7654,7 @@ dependencies = [ "parking_lot 0.10.2", "sp-block-builder", "sp-consensus", + "sp-database", "sp-runtime", "sp-state-machine", ] diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 25f9f3d29b..d10fa7ac0e 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -314,7 +314,7 @@ mod tests { let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); + db.commit(tx).unwrap(); let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); @@ -348,12 +348,12 @@ mod tests { let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); + db.commit(tx).unwrap(); let _ = set.finalize_height(11); let mut tx = Transaction::new(); set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); + db.commit(tx).unwrap(); assert!(set.contains(11, 11_1)); assert!(set.contains(11, 11_2)); diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 2b7cd2e620..5501f0f186 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -342,8 +342,9 @@ impl BlockchainCache for DbCacheSync { EntryType::Genesis, )?; let tx_ops = tx.into_ops(); - db.commit(dbtx); + db.commit(dbtx)?; cache.commit(tx_ops)?; + Ok(()) } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 958e6e39f4..a2299a8233 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -719,7 +719,7 @@ mod tests { None, None, ).unwrap(); - backend.storage.db.commit(tx); + backend.storage.db.commit(tx).unwrap(); backend.changes_tries_storage.post_commit(Some(cache_ops)); }; diff --git a/client/db/src/children.rs b/client/db/src/children.rs index 3916321f17..bfba797cd4 100644 --- a/client/db/src/children.rs +++ b/client/db/src/children.rs @@ -99,7 +99,7 @@ mod tests { children2.push(1_6); write_children(&mut tx, 0, PREFIX, 1_2, children2); - db.commit(tx.clone()); + db.commit(tx.clone()).unwrap(); let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); @@ -108,7 +108,7 @@ mod tests { assert_eq!(r2, vec![1_4, 1_6]); remove_children(&mut tx, 0, PREFIX, 1_2); - db.commit(tx); + db.commit(tx).unwrap(); let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index b4f4892a04..7cfde1e1d9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1243,7 +1243,7 @@ impl Backend { None }; - self.storage.db.commit(transaction); + self.storage.db.commit(transaction)?; if let Some(( number, @@ -1356,7 +1356,7 @@ impl sc_client_api::backend::AuxStore for Backend where Block: Blo for k in delete { transaction.remove(columns::AUX, k); } - self.storage.db.commit(transaction); + self.storage.db.commit(transaction)?; Ok(()) } @@ -1438,7 +1438,7 @@ impl sc_client_api::backend::Backend for Backend { &mut changes_trie_cache_ops, &mut displaced, )?; - self.storage.db.commit(transaction); + self.storage.db.commit(transaction)?; self.blockchain.update_meta(hash, number, is_best, is_finalized); self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) @@ -1536,7 +1536,7 @@ impl sc_client_api::backend::Backend for Backend { transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); - self.storage.db.commit(transaction); + self.storage.db.commit(transaction)?; self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(best_hash, best_number, true, update_finalized); } @@ -1555,7 +1555,7 @@ impl sc_client_api::backend::Backend for Backend { leaves.revert(best_hash, best_number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.commit(transaction); + self.storage.db.commit(transaction)?; Ok(()) }; diff --git a/client/db/src/light.rs b/client/db/src/light.rs index f115ac9599..3dc6453cd9 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -402,7 +402,8 @@ impl AuxStore for LightStorage for k in delete { transaction.remove(columns::AUX, k); } - self.db.commit(transaction); + self.db.commit(transaction)?; + Ok(()) } @@ -495,7 +496,7 @@ impl Storage for LightStorage debug!("Light DB Commit {:?} ({})", hash, number); - self.db.commit(transaction); + self.db.commit(transaction)?; cache.commit(cache_ops) .expect("only fails if cache with given name isn't loaded yet;\ cache is already loaded because there are cache_ops; qed"); @@ -513,8 +514,9 @@ impl Storage for LightStorage let mut transaction = Transaction::new(); self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; - self.db.commit(transaction); + self.db.commit(transaction)?; self.update_meta(hash, header.number().clone(), true, false); + Ok(()) } else { Err(ClientError::UnknownBlock(format!("Cannot set head {:?}", id))) @@ -552,7 +554,7 @@ impl Storage for LightStorage )? .into_ops(); - self.db.commit(transaction); + self.db.commit(transaction)?; cache.commit(cache_ops) .expect("only fails if cache with given name isn't loaded yet;\ cache is already loaded because there are cache_ops; qed"); diff --git a/client/db/src/offchain.rs b/client/db/src/offchain.rs index f6a0925a08..c4f0ce115c 100644 --- a/client/db/src/offchain.rs +++ b/client/db/src/offchain.rs @@ -25,6 +25,7 @@ use std::{ use crate::{columns, Database, DbHash, Transaction}; use parking_lot::Mutex; +use log::error; /// Offchain local storage #[derive(Clone)] @@ -64,7 +65,9 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { let mut tx = Transaction::new(); tx.set(columns::OFFCHAIN, &key, value); - self.db.commit(tx); + if let Err(err) = self.db.commit(tx) { + error!("Error setting on local storage: {}", err) + } } fn remove(&mut self, prefix: &[u8], key: &[u8]) { @@ -72,7 +75,9 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { let mut tx = Transaction::new(); tx.remove(columns::OFFCHAIN, &key); - self.db.commit(tx); + if let Err(err) = self.db.commit(tx) { + error!("Error removing on local storage: {}", err) + } } fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index ad1c6c7656..7085aa3bf8 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . /// A `Database` adapter for parity-db. -use sp_database::{Database, Change, Transaction, ColumnId}; +use sp_database::{Database, Change, ColumnId, Transaction, error::DatabaseError}; use crate::utils::NUM_COLUMNS; use crate::columns; @@ -44,7 +44,7 @@ pub fn open(path: &std::path::Path) -> parity_db::Result Database for DbAdapter { - fn commit(&self, transaction: Transaction) { + fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { handle_err(self.0.commit(transaction.0.into_iter().map(|change| match change { Change::Set(col, key, value) => (col as u8, key, Some(value)), @@ -52,6 +52,8 @@ impl Database for DbAdapter { _ => unimplemented!(), })) ); + + Ok(()) } fn get(&self, col: ColumnId, key: &[u8]) -> Option> { diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index b531001cf9..c25b978be0 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -297,7 +297,7 @@ pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.commit(transaction) + db.commit(transaction)?; }, } diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 956ae1a8fc..0ce19cba33 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -22,3 +22,4 @@ sp-consensus = { version = "0.8.0-rc4", path = "../consensus/common" } sp-runtime = { version = "2.0.0-rc4", path = "../runtime" } sp-block-builder = { version = "2.0.0-rc4", path = "../block-builder" } sp-state-machine = { version = "0.8.0-rc4", path = "../state-machine" } +sp-database = { version = "2.0.0-rc4", path = "../database" } diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 17c276d870..bc412e8358 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -130,6 +130,8 @@ pub enum Error { IncompletePipeline, #[display(fmt = "Transaction pool not ready for block production.")] TransactionPoolNotReady, + #[display(fmt = "Database: {}", _0)] + DatabaseError(sp_database::error::DatabaseError), /// A convenience variant for String #[display(fmt = "{}", _0)] Msg(String), diff --git a/primitives/database/src/error.rs b/primitives/database/src/error.rs new file mode 100644 index 0000000000..2e5d4557a9 --- /dev/null +++ b/primitives/database/src/error.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// The error type for database operations. +#[derive(Debug)] +pub struct DatabaseError(pub Box); + +impl std::fmt::Display for DatabaseError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::error::Error for DatabaseError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + None + } +} + +/// A specialized `Result` type for database operations. +pub type Result = std::result::Result; diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index e05320deed..f436979aaf 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -19,7 +19,7 @@ use ::kvdb::{DBTransaction, KeyValueDB}; -use crate::{Database, Change, Transaction, ColumnId}; +use crate::{Database, Change, ColumnId, Transaction, error}; struct DbAdapter(D); @@ -38,7 +38,7 @@ pub fn as_database(db: D) -> std::sync::Arc Database for DbAdapter { - fn commit(&self, transaction: Transaction) { + fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut tx = DBTransaction::new(); for change in transaction.0.into_iter() { match change { @@ -47,7 +47,7 @@ impl Database for DbAdapter { _ => unimplemented!(), } } - handle_err(self.0.write(tx)); + self.0.write(tx).map_err(|e| error::DatabaseError(Box::new(e))) } fn get(&self, col: ColumnId, key: &[u8]) -> Option> { diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index 1fb7b15666..1908eb49bb 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -17,6 +17,7 @@ //! The main database trait, allowing Substrate to store data persistently. +pub mod error; mod mem; mod kvdb; @@ -82,20 +83,22 @@ impl Transaction { pub trait Database: Send + Sync { /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` /// will reflect the new state. - fn commit(&self, transaction: Transaction) { + fn commit(&self, transaction: Transaction) -> error::Result<()> { for change in transaction.0.into_iter() { match change { Change::Set(col, key, value) => self.set(col, &key, &value), Change::Remove(col, key) => self.remove(col, &key), Change::Store(hash, preimage) => self.store(&hash, &preimage), Change::Release(hash) => self.release(&hash), - } + }?; } + + Ok(()) } /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` /// will reflect the new state. - fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) { + fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) -> error::Result<()> { let mut tx = Transaction::new(); for change in transaction { match change { @@ -105,13 +108,13 @@ pub trait Database: Send + Sync { ChangeRef::Release(hash) => tx.release(hash), } } - self.commit(tx); + self.commit(tx) } /// Retrieve the value previously stored against `key` or `None` if /// `key` is not currently in the database. fn get(&self, col: ColumnId, key: &[u8]) -> Option>; - + /// Call `f` with the value previously stored against `key`. /// /// This may be faster than `get` since it doesn't allocate. @@ -119,24 +122,24 @@ pub trait Database: Send + Sync { fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { self.get(col, key).map(|v| f(&v)); } - + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { + fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) -> error::Result<()> { let mut t = Transaction::new(); t.set(col, key, value); - self.commit(t); + self.commit(t) } /// Remove the value of `key` in `col`. - fn remove(&self, col: ColumnId, key: &[u8]) { + fn remove(&self, col: ColumnId, key: &[u8]) -> error::Result<()> { let mut t = Transaction::new(); t.remove(col, key); - self.commit(t); + self.commit(t) } /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is /// currently stored. fn lookup(&self, hash: &H) -> Option>; - + /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage /// is currently stored. /// @@ -145,23 +148,23 @@ pub trait Database: Send + Sync { fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { self.lookup(hash).map(|v| f(&v)); } - + /// Store the `preimage` of `hash` into the database, so that it may be looked up later with /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent /// calls will ignore `preimage` and simply increase the number of references on `hash`. - fn store(&self, hash: &H, preimage: &[u8]) { + fn store(&self, hash: &H, preimage: &[u8]) -> error::Result<()> { let mut t = Transaction::new(); t.store(hash.clone(), preimage); - self.commit(t); + self.commit(t) } - + /// Release the preimage of `hash` from the database. An equal number of these to the number of /// corresponding `store`s must have been given before it is legal for `Database::lookup` to /// be unable to provide the preimage. - fn release(&self, hash: &H) { + fn release(&self, hash: &H) -> error::Result<()> { let mut t = Transaction::new(); t.release(hash.clone()); - self.commit(t); + self.commit(t) } } diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index cbfc4f31d9..51cb854334 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -18,7 +18,7 @@ //! In-memory implementation of `Database` use std::collections::HashMap; -use crate::{Database, Transaction, ColumnId, Change}; +use crate::{Database, Change, ColumnId, Transaction, error}; use parking_lot::RwLock; #[derive(Default)] @@ -29,7 +29,7 @@ pub struct MemDb Database for MemDb where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash { - fn commit(&self, transaction: Transaction) { + fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut s = self.0.write(); for change in transaction.0.into_iter() { match change { @@ -39,6 +39,8 @@ impl Database for MemDb Change::Release(hash) => { s.1.remove(&hash); }, } } + + Ok(()) } fn get(&self, col: ColumnId, key: &[u8]) -> Option> { -- GitLab From 37500cecbfd71ea03fd109f2ca8daeab5689b3e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 9 Jul 2020 15:07:02 +0200 Subject: [PATCH 144/144] seal: Rework contracts API (#6573) * Transition getter functions to not use scratch buffer * Remove scratch buffer from ext_get_storage * Remove scratch buffer from ext_call * Remove scratch buffer from ext_instantiate * Add ext_input and remove scratch buffer * Rework error handling (changes RPC exposed data) * ext_return passes a flags field instead of a return code * Flags is only for seal and not for the caller * flags: u32 replaced status_code: u8 in RPC exposed type * API functions use a unified error type (ReturnCode) * ext_transfer now traps on error to be consistent with call and instantiate * Remove the no longer used `Dispatched` event * Updated inline documentation * Prevent skipping of copying the output for getter API * Return gas_consumed from the RPC contracts call interface * Updated COMPLEXTITY.md * Rename ext_gas_price to ext_weight_to_fee * Align comments with spaces * Removed no longer used `ExecError` * Remove possible panic in `from_typed_value` * Use a struct as associated data for SpecialTrap::Return * Fix nits in COMPLEXITY.md * Renamed SpecialTrap to TrapReason * Fix test * Finish renaming special_trap -> trap_reason * Remove no longer used get_runtime_storage * fixup! Remove no longer used get_runtime_storage * Removed tabs for comment aligment --- Cargo.lock | 1 + bin/node/executor/tests/basic.rs | 29 +- bin/node/runtime/src/lib.rs | 5 +- frame/contracts/COMPLEXITY.md | 228 +++--- frame/contracts/Cargo.toml | 1 + frame/contracts/fixtures/caller_contract.wat | 153 ++-- .../fixtures/check_default_rent_allowance.wat | 30 +- frame/contracts/fixtures/crypto_hashes.wat | 29 +- .../fixtures/destroy_and_transfer.wat | 73 +- frame/contracts/fixtures/drain.wat | 30 +- frame/contracts/fixtures/restoration.wat | 30 +- .../fixtures/return_from_start_fn.wat | 3 +- frame/contracts/fixtures/return_with_data.wat | 38 +- frame/contracts/fixtures/self_destruct.wat | 46 +- .../fixtures/self_destructing_constructor.wat | 30 +- frame/contracts/fixtures/set_rent.wat | 30 +- frame/contracts/fixtures/storage_size.wat | 36 +- frame/contracts/rpc/runtime-api/src/lib.rs | 7 +- frame/contracts/rpc/src/lib.rs | 17 +- frame/contracts/src/exec.rs | 209 ++--- frame/contracts/src/gas.rs | 4 +- frame/contracts/src/lib.rs | 29 +- frame/contracts/src/tests.rs | 11 +- frame/contracts/src/wasm/mod.rs | 575 +++++--------- frame/contracts/src/wasm/prepare.rs | 6 +- frame/contracts/src/wasm/runtime.rs | 714 ++++++++++-------- 26 files changed, 1112 insertions(+), 1252 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0499f75553..617d67e714 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4178,6 +4178,7 @@ name = "pallet-contracts" version = "2.0.0-rc4" dependencies = [ "assert_matches", + "bitflags", "frame-support", "frame-system", "hex-literal", diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index e4de98d90e..9ee7824e51 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -491,32 +491,31 @@ const CODE_TRANSFER: &str = r#" ;; value_ptr: u32, ;; value_len: u32, ;; input_data_ptr: u32, -;; input_data_len: u32 +;; input_data_len: u32, +;; output_ptr: u32, +;; output_len_ptr: u32 ;; ) -> u32 -(import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) -(import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) -(import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) +(import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) +(import "env" "ext_input" (func $ext_input (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "deploy") ) (func (export "call") (block $fail - ;; load and check the input data (which is stored in the scratch buffer). + ;; Load input data to contract memory + (call $ext_input + (i32.const 0) + (i32.const 52) + ) + ;; fail if the input size is not != 4 (br_if $fail (i32.ne (i32.const 4) - (call $ext_scratch_size) + (i32.load (i32.const 52)) ) ) - (call $ext_scratch_read - (i32.const 0) - (i32.const 0) - (i32.const 4) - ) - - (br_if $fail (i32.ne (i32.load8_u (i32.const 0)) @@ -551,6 +550,8 @@ const CODE_TRANSFER: &str = r#" (i32.const 16) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) ) @@ -571,6 +572,8 @@ const CODE_TRANSFER: &str = r#" "\06\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" "\00\00" ) +;; Length of the input buffer +(data (i32.const 52) "\04") ) "#; diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f6e85cb34f..6e5a67387c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1066,12 +1066,13 @@ impl_runtime_apis! { gas_limit: u64, input_data: Vec, ) -> ContractExecResult { - let exec_result = + let (exec_result, gas_consumed) = Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data); match exec_result { Ok(v) => ContractExecResult::Success { - status: v.status, + flags: v.flags.bits(), data: v.data, + gas_consumed: gas_consumed, }, Err(_) => ContractExecResult::Error, } diff --git a/frame/contracts/COMPLEXITY.md b/frame/contracts/COMPLEXITY.md index 7e8c2903c7..dbb1a5c5cd 100644 --- a/frame/contracts/COMPLEXITY.md +++ b/frame/contracts/COMPLEXITY.md @@ -4,19 +4,19 @@ This analysis is on the computing and memory complexity of specific procedures. The primary goal is to come up with decent pricing for functions that can be invoked by a user (via extrinsics) or by untrusted code that prevents DoS attacks. -# Sandboxing +## Sandboxing It makes sense to describe the sandboxing module first because the smart-contract module is built upon it. -## Memory +### Memory -### set +#### set Copies data from the supervisor's memory to the guest's memory. **complexity**: It doesn't allocate, and the computational complexity is proportional to the number of bytes to copy. -### get +#### get Copies data from the guest's memory to the supervisor's memory. @@ -78,17 +78,10 @@ The size of the arguments and the return value depends on the exact function in **complexity**: Memory and computational complexity can be considered as a constant. -# `AccountDb` +## Transactional Storage -`AccountDb` is an abstraction that supports collecting changes to accounts with the ability to efficiently reverting them. Contract -execution contexts operate on the AccountDb. All changes are flushed into underlying storage only after origin transaction succeeds. - -## Relation to the underlying storage - -At present, `AccountDb` is implemented as a cascade of overlays with the direct storage at the bottom. The direct -storage `AccountDb` leverages child tries. Each overlay is represented by a `Map`. On a commit from an overlay to an -overlay, maps are merged. On commit from an overlay to the bottommost `AccountDb` all changes are flushed to the storage -and on revert, the overlay is just discarded. +The contracts module makes use of the nested storage transactions feature offered by +the underlying storage which allows efficient roll back of changes made by contracts. > ℹ️ The underlying storage has a overlay layer implemented as a `Map`. If the runtime reads a storage location and the > respective key doesn't exist in the overlay, then the underlying storage performs a DB access, but the value won't be @@ -105,23 +98,24 @@ storage access. ## get_storage, get_code_hash, get_rent_allowance, get_balance, contract_exists -These functions check the local cache for a requested value and, if it is there, the value is returned. Otherwise, these functions will ask an underlying `AccountDb` for the value. This means that the number of lookups is proportional to the depth of the overlay cascade. If the value can't be found before reaching the bottommost `AccountDb`, then a DB read will be performed (in case `get_balance` the function `free_balance` will be invoked). - -A lookup in the local cache consists of at least one `Map` lookup, for locating the specific account. For `get_storage` there is a second lookup: because account's storage is implemented as a nested map, another lookup is required for fetching a storage value by a key. +Those query the underlying storage for the requested value. If the value was modified in the +current block they are served from the cache. Otherwise a database read is performed. -These functions return an owned value as its result, so memory usage depends on the value being returned. - -**complexity**: The memory complexity is proportional to the size of the value. The computational complexity is proportional to the depth of the overlay cascade and the size of the value; the cost is dominated by the DB read though. +**complexity**: The memory complexity is proportional to the size of the value. The computational complexity is proportional the size of the value; the cost is dominated by the DB read. ## set_storage, set_balance, set_rent_allowance -These functions only modify the local `Map`. - -A lookup in the local cache consists of at least one `Map` lookup, for locating the specific account. For `get_storage` there is a second lookup: because account's storage is implemented as a nested map, another lookup is required for fetching a storage value by a key. +These function write to the underlying storage which caches those values and does not write +them to the database immediately. -While these functions only modify the local `Map`, if changes made by them are committed to the bottommost `AccountDb`, each changed entry in the `Map` will require a DB write. Moreover, if the balance of the account is changed to be below `existential_deposit` then that account along with all its storage will be removed, which requires time proportional to the number of storage entries that account has. It should be ensured that pricing accounts for these facts. +While these functions only modify the local cache, they trigger a database write later when +all changes that were not rolled back are written to storage. Moreover, if the balance of the +account is changed to be below `existential_deposit` then that account along with all its storage +will be removed, which requires time proportional to the number of storage entries that account has. +It should be ensured that pricing accounts for these facts. -**complexity**: Each lookup has a logarithmical computing time to the number of already inserted entries. No additional memory is required. +**complexity**: Each lookup has a logarithmical computing time to the number of already inserted entries. +No additional memory is required. ## instantiate_contract @@ -131,9 +125,11 @@ Calls `contract_exists` and if it doesn't exist, do not modify the local `Map` s ## commit -In this function, all cached values will be inserted into the underlying `AccountDb` or into the storage. +In this function, all values modified in the current transactions are committed to the parent +transaction. -We are doing `N` inserts into `Map` (`O(log M)` complexity) or into the storage, where `N` is the size of the committed `Map` and `M` is the size of the map of the underlying overlay. Consider adjusting the price of modifying the `AccountDb` to account for this (since pricing for the count of entries in `commit` will make the price of commit way less predictable). No additional memory is required. +This will trigger `N` inserts into parent transaction (`O(log M)` complexity) or into the storage, where `N` is the size of the current transaction and `M` is the size of the parent transaction. Consider adjusting the price of modifying the +current transaction to account for this (since pricing for the count of entries in `commit` will make the price of commit way less predictable). No additional memory is required. Note that in case of storage modification we need to construct a key in the underlying storage. In order to do that we need: @@ -143,21 +139,21 @@ Note that in case of storage modification we need to construct a key in the unde There is also a special case to think of: if the balance of some account goes below `existential_deposit`, then all storage entries of that account will be erased, which requires time proportional to the number of storage entries that account has. -**complexity**: `N` inserts into a `Map` or eventually into the storage (if committed). Every deleted account will induce removal of all its storage which is proportional to the number of storage entries that account has. +**complexity**: `N` inserts into a transaction or eventually into the storage (if committed). Every deleted account will induce removal of all its storage which is proportional to the number of storage entries that account has. ## revert -Consists of dropping (in the Rust sense) of the `AccountDb`. +Consists of dropping (in the Rust sense) of the current transaction. **complexity**: Computing complexity is proportional to a number of changed entries in a overlay. No additional memory is required. -# Executive +## Executive -## Transfer +### Transfer This function performs the following steps: -1. Querying source and destination balances from an overlay (see `get_balance`), +1. Querying source and destination balances from the current transaction (see `get_balance`), 2. Querying `existential_deposit`. 3. Executing `ensure_account_liquid` hook. 4. Updating source and destination balance in the overlay (see `set_balance`). @@ -171,9 +167,9 @@ returns with an error. Assuming marshaled size of a balance value is of the constant size we can neglect its effect on the performance. -**complexity**: up to 2 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has. For the current `AccountDb` implementation computing complexity also depends on the depth of the `AccountDb` cascade. Memorywise it can be assumed to be constant. +**complexity**: up to 2 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has. Memorywise it can be assumed to be constant. -## Initialization +### Initialization Before a call or instantiate can be performed the execution context must be initialized. @@ -188,7 +184,7 @@ implementation they just involve a DB read. For subsequent calls and instantiations during contract execution, the initialization requires no expensive operations. -## Terminate +### Terminate This function performs the following steps: @@ -204,17 +200,17 @@ the call stack is of a fixed maximum size we consider this operation as constant we are using child trie removal which is linear in the amount of stored keys. Upcoming changes will make the account removal constant time. - -## Call +### Call This function receives input data for the contract execution. The execution consists of the following steps: 1. Initialization of the execution context. 2. Checking rent payment. 3. Loading code from the DB. -4. `transfer`-ing funds between the caller and the destination account. -5. Executing the code of the destination account. -6. Committing overlayed changed to the underlying `AccountDb`. +4. Starting a new storage transaction. +5. `transfer`-ing funds between the caller and the destination account. +6. Executing the code of the destination account. +7. Committing or rolling back the storage transaction. **Note** that the complexity of executing the contract code should be considered separately. @@ -235,22 +231,24 @@ Loading code most likely will trigger a DB read, since the code is immutable and Also, `transfer` can make up to 2 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has. -Finally, all changes are `commit`-ted into the underlying overlay. The complexity of this depends on the number of changes performed by the code. Thus, the pricing of storage modification should account for that. +Finally, the current storage transaction is closed. The complexity of this depends on the number of changes performed by the code. Thus, the pricing of storage modification should account for that. **complexity**: + - Only for the first invocation of the contract: up to 5 DB reads and one DB write as well as logic executed by `ensure_can_withdraw`, `withdraw`, `make_free_balance_be`. - On top of that for every invocation: Up to 5 DB reads. DB read of the code is of dynamic size. There can also be up to 2 DB writes (if flushed to the storage). Additionally, if the source account removal takes place a DB write will be performed per one storage entry that the account has. -## Instantiate +### Instantiate This function takes the code of the constructor and input data. Instantiation of a contract consists of the following steps: 1. Initialization of the execution context. 2. Calling `DetermineContractAddress` hook to determine an address for the contract, -3. `transfer`-ing funds between self and the newly instantiated contract. -4. Executing the constructor code. This will yield the final code of the code. -5. Storing the code for the newly instantiated contract in the overlay. -6. Committing overlayed changed to the underlying `AccountDb`. +3. Starting a new storage transaction. +4. `transfer`-ing funds between self and the newly instantiated contract. +5. Executing the constructor code. This will yield the final code of the code. +6. Storing the code for the newly instantiated contract in the overlay. +7. Committing or rolling back the storage transaction. **Note** that the complexity of executing the constructor code should be considered separately. @@ -262,19 +260,43 @@ Also, `transfer` can make up to 2 DB reads and up to 2 DB writes (if flushed to Storing the code in the overlay may induce another DB write (if flushed to the storage) with the size proportional to the size of the constructor code. -Finally, all changes are `commit`-ted into the underlying overlay. The complexity of this depends on the number of changes performed by the constructor code. Thus, the pricing of storage modification should account for that. +Finally, the current storage transaction is closed.. The complexity of this depends on the number of changes performed by the constructor code. Thus, the pricing of storage modification should account for that. **complexity**: Up to 2 DB reads and induces up to 3 DB writes (if flushed to the storage), one of which is dependent on the size of the code. Additionally, if the source account removal takes place a DB write will be performed per one storage entry that the account has. -# Externalities +## Contracts API + +Each API function invoked from a contract can involve some overhead. + +## Getter functions + +Those are simple getter functions which copy a requested value to contract memory. They +all have the following two arguments: + +- `output_ptr`: Pointer into contract memory where to copy the value. +- `output_len_ptr`: Pointer into contract memory where the size of the buffer is stored. The size of the copied value is also stored there. -Each external function invoked from a contract can involve some overhead. +**complexity**: The size of the returned value is constant for a given runtime. Therefore we +consider its complexity constant even though some of them might involve at most one DB read. Some of those +functions call into other pallets of the runtime. The assumption here is that those functions are also +linear in regard to the size of the data that is returned and therefore considered constant for a +given runtime. -## ext_gas +This is the list of getters: -**complexity**: This is of constant complexity. +- ext_caller +- ext_address +- ext_weight_to_fee +- ext_gas_left +- ext_balance +- ext_value_transferred +- ext_now +- ext_minimum_balance +- ext_tombstone_deposit +- ext_rent_allowance +- ext_block_number -## ext_set_storage +### ext_set_storage This function receives a `key` and `value` as arguments. It consists of the following steps: @@ -283,7 +305,7 @@ This function receives a `key` and `value` as arguments. It consists of the foll **complexity**: Complexity is proportional to the size of the `value`. This function induces a DB write of size proportional to the `value` size (if flushed to the storage), so should be priced accordingly. -## ext_clear_storage +### ext_clear_storage This function receives a `key` as argument. It consists of the following steps: @@ -293,23 +315,22 @@ This function receives a `key` as argument. It consists of the following steps: **complexity**: Complexity is constant. This function induces a DB write to clear the storage entry (upon being flushed to the storage) and should be priced accordingly. -## ext_get_storage +### ext_get_storage This function receives a `key` as an argument. It consists of the following steps: 1. Reading the sandbox memory for `key` (see sandboxing memory get). 2. Reading the storage with the given key (see `get_storage`). It receives back the owned result buffer. -3. Replacing the scratch buffer. +3. Writing the storage value to contract memory. Key is of a constant size. Therefore, the sandbox memory load can be considered to be of constant complexity. Unless the value is cached, a DB read will be performed. The size of the value is not known until the read is performed. Moreover, the DB read has to be synchronous and no progress can be made until the value is fetched. -**complexity**: The memory and computing complexity is proportional to the size of the fetched value. This function performs a -DB read. +**complexity**: The memory and computing complexity is proportional to the size of the fetched value. This function performs a DB read. -## ext_transfer +### ext_transfer This function receives the following arguments: @@ -320,18 +341,19 @@ It consists of the following steps: 1. Loading `account` buffer from the sandbox memory (see sandboxing memory get) and then decoding it. 2. Loading `value` buffer from the sandbox memory and then decoding it. -4. Invoking the executive function `transfer`. +3. Invoking the executive function `transfer`. Loading of `account` and `value` buffers should be charged. This is because the sizes of buffers are specified by the calling code, even though marshaled representations are, essentially, of constant size. This can be fixed by assigning an upper bound for sizes of `AccountId` and `Balance`. -## ext_call +### ext_call This function receives the following arguments: - `callee` buffer of a marshaled `AccountId`, - `gas` limit which is plain u64, - `value` buffer of a marshaled `Balance`, -- `input_data`. An arbitrarily sized byte vector. +- `input_data` an arbitrarily sized byte vector. +- `output_ptr` pointer to contract memory. It consists of the following steps: @@ -339,14 +361,15 @@ It consists of the following steps: 2. Loading `value` buffer from the sandbox memory and then decoding it. 3. Loading `input_data` buffer from the sandbox memory. 4. Invoking the executive function `call`. +5. Writing output buffer to contract memory. Loading of `callee` and `value` buffers should be charged. This is because the sizes of buffers are specified by the calling code, even though marshaled representations are, essentially, of constant size. This can be fixed by assigning an upper bound for sizes of `AccountId` and `Balance`. Loading `input_data` should be charged in any case. -**complexity**: All complexity comes from loading buffers and executing `call` executive function. The former component is proportional to the sizes of `callee`, `value` and `input_data` buffers. The latter component completely depends on the complexity of `call` executive function, and also dominated by it. +**complexity**: All complexity comes from loading and writing buffers and executing `call` executive function. The former component is proportional to the sizes of `callee`, `value`, `input_data` and `output_ptr` buffers. The latter component completely depends on the complexity of `call` executive function, and also dominated by it. -## ext_instantiate +### ext_instantiate This function receives the following arguments: @@ -368,7 +391,7 @@ Loading `init_code` and `input_data` should be charged in any case. **complexity**: All complexity comes from loading buffers and executing `instantiate` executive function. The former component is proportional to the sizes of `init_code`, `value` and `input_data` buffers. The latter component completely depends on the complexity of `instantiate` executive function and also dominated by it. -## ext_terminate +### ext_terminate This function receives the following arguments: @@ -382,16 +405,23 @@ Loading of the `beneficiary` buffer should be charged. This is because the sizes **complexity**: All complexity comes from loading buffers and executing `terminate` executive function. The former component is proportional to the size of the `beneficiary` buffer. The latter component completely depends on the complexity of `terminate` executive function and also dominated by it. -## ext_return +### ext_input -This function receives a `data` buffer as an argument. Execution of the function consists of the following steps: +This function receives a pointer to contract memory. It copies the input to the contract call to this location. -1. Loading `data` buffer from the sandbox memory (see sandboxing memory get), -2. Trapping +**complexity**: The complextity is proportional to the size of the input buffer. + +### ext_return + +This function receives a `data` buffer and `flags` arguments. Execution of the function consists of the following steps: + +1. Loading `data` buffer from the sandbox memory (see sandboxing memory get). +2. Storing the `u32` flags value. +3. Trapping **complexity**: The complexity of this function is proportional to the size of the `data` buffer. -## ext_deposit_event +### ext_deposit_event This function receives a `data` buffer as an argument. Execution of the function consists of the following steps: @@ -402,49 +432,7 @@ This function receives a `data` buffer as an argument. Execution of the function **complexity**: The complexity of this function is proportional to the size of the `data` buffer. -## ext_caller - -This function serializes the address of the caller into the scratch buffer. - -**complexity**: Assuming that the address is of constant size, this function has constant complexity. - -## ext_random - -This function serializes a random number generated by the given subject into the scratch buffer. -The complexity of this function highly depends on the complexity of `System::random`. `max_subject_len` -limits the size of the subject buffer. - -**complexity**: The complexity of this function depends on the implementation of `System::random`. - -## ext_now - -This function serializes the current block's timestamp into the scratch buffer. - -**complexity**: Assuming that the timestamp is of constant size, this function has constant complexity. - -## ext_scratch_size - -This function returns the size of the scratch buffer. - -**complexity**: This function is of constant complexity. - -## ext_scratch_read - -This function copies slice of data from the scratch buffer to the sandbox memory. The calling code specifies the slice length. Execution of the function consists of the following steps: - -1. Storing a specified slice of the scratch buffer into the sandbox memory (see sandboxing memory set) - -**complexity**: The computing complexity of this function is proportional to the length of the slice. No additional memory is required. - -## ext_scratch_write - -This function copies slice of data from the sandbox memory to the scratch buffer. The calling code specifies the slice length. Execution of the function consists of the following steps: - -1. Loading a slice from the sandbox memory into the (see sandboxing memory get) - -**complexity**: Complexity is proportional to the length of the slice. - -## ext_set_rent_allowance +### ext_set_rent_allowance This function receives the following argument: @@ -457,22 +445,6 @@ It consists of the following steps: **complexity**: Complexity is proportional to the size of the `value`. This function induces a DB write of size proportional to the `value` size (if flushed to the storage), so should be priced accordingly. -## ext_rent_allowance - -It consists of the following steps: - -1. Invoking `get_rent_allowance` AccountDB function. -2. Serializing the rent allowance of the current contract into the scratch buffer. - -**complexity**: Assuming that the rent allowance is of constant size, this function has constant complexity. This -function performs a DB read. - -## ext_block_number - -This function serializes the current block's number into the scratch buffer. - -**complexity**: Assuming that the block number is of constant size, this function has constant complexity. - ## Built-in hashing functions This paragraph concerns the following supported built-in hash functions: diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 348b8ff0e0..df5a47bb0e 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -25,6 +25,7 @@ sp-sandbox = { version = "0.8.0-rc4", default-features = false, path = "../../pr frame-support = { version = "2.0.0-rc4", default-features = false, path = "../support" } frame-system = { version = "2.0.0-rc4", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "2.0.0-rc4", default-features = false, path = "common" } +bitflags = "1.0" [dev-dependencies] wabt = "0.9.2" diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index 4bc122c0b1..369007834d 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -1,9 +1,8 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_balance" (func $ext_balance (param i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "ext_println" (func $ext_println (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -17,14 +16,16 @@ ) (func $current_balance (param $sp i32) (result i64) - (call $ext_balance) - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 8)) + (i32.store + (i32.sub (get_local $sp) (i32.const 16)) + (i32.const 8) ) - (call $ext_scratch_read + (call $ext_balance (i32.sub (get_local $sp) (i32.const 8)) - (i32.const 0) - (i32.const 8) + (i32.sub (get_local $sp) (i32.const 16)) + ) + (call $assert + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 16))) (i32.const 8)) ) (i64.load (i32.sub (get_local $sp) (i32.const 8))) ) @@ -36,21 +37,20 @@ (local $exit_code i32) (local $balance i64) + ;; Length of the buffer + (i32.store (i32.const 20) (i32.const 32)) + + ;; Copy input to this contracts memory + (call $ext_input (i32.const 24) (i32.const 20)) + ;; Input data is the code hash of the contract to be deployed. (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 20)) (i32.const 32) ) ) - ;; Copy code hash from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 24) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 32) ;; Count of bytes to copy. - ) - ;; Read current balance into local variable. (set_local $sp (i32.const 1024)) (set_local $balance @@ -67,17 +67,16 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) ) ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x11)) - ) - - ;; Check that scratch buffer is empty since contract instantiation failed. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) + (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) ;; Check that balance has not changed. @@ -95,17 +94,16 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) ) ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x0100)) - ) - - ;; Check that scratch buffer is empty since contract instantiation failed. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) + (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Check that balance has not changed. @@ -113,6 +111,12 @@ (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) ) + ;; Length of the output buffer + (i32.store + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 8) + ) + ;; Deploy the contract successfully. (set_local $exit_code (call $ext_instantiate @@ -123,24 +127,22 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer + (i32.const 16) ;; Pointer to the address output buffer + (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) ) ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x00)) + (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success ) - ;; Check that scratch buffer contains the address of the new contract. + ;; Check that address has the expected length (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 8)) - ) - - ;; Copy contract address from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 16) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 8)) ) ;; Check that balance has been deducted. @@ -151,6 +153,18 @@ (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) ) + ;; Zero out destination buffer of output + (i32.store + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + ) + + ;; Length of the output buffer + (i32.store + (i32.sub (get_local $sp) (i32.const 8)) + (i32.const 4) + ) + ;; Call the new contract and expect it to return failing exit code. (set_local $exit_code (call $ext_call @@ -161,26 +175,19 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer + (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x11)) + (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) - ;; Check that scratch buffer contains the expected return data. + ;; Check that output buffer contains the expected return data. (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 3)) - ) - (i32.store - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - ) - (call $ext_scratch_read - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - (i32.const 3) + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 3)) ) (call $assert (i32.eq @@ -204,17 +211,14 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this cas ) ) ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x0100)) - ) - - ;; Check that scratch buffer is empty since call trapped. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) + (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Check that balance has not changed. @@ -222,6 +226,18 @@ (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) ) + ;; Zero out destination buffer of output + (i32.store + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + ) + + ;; Length of the output buffer + (i32.store + (i32.sub (get_local $sp) (i32.const 8)) + (i32.const 4) + ) + ;; Call the contract successfully. (set_local $exit_code (call $ext_call @@ -232,26 +248,19 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer + (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x00)) + (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success ) - ;; Check that scratch buffer contains the expected return data. + ;; Check that the output buffer contains the expected return data. (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 4)) - ) - (i32.store - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - ) - (call $ext_scratch_read - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - (i32.const 4) + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 4)) ) (call $assert (i32.eq @@ -271,5 +280,5 @@ (data (i32.const 0) "\00\80") ;; The value to transfer on instantiation and calls. ;; Chosen to be greater than existential deposit. - (data (i32.const 8) "\00\11\22\33\44\55\66\77") ;; The input data to instantiations and calls. + (data (i32.const 8) "\00\01\22\33\44\55\66\77") ;; The input data to instantiations and calls. ) diff --git a/frame/contracts/fixtures/check_default_rent_allowance.wat b/frame/contracts/fixtures/check_default_rent_allowance.wat index 12b3004adf..b3076a0432 100644 --- a/frame/contracts/fixtures/check_default_rent_allowance.wat +++ b/frame/contracts/fixtures/check_default_rent_allowance.wat @@ -1,9 +1,14 @@ (module - (import "env" "ext_rent_allowance" (func $ext_rent_allowance)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_rent_allowance" (func $ext_rent_allowance (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 8) reserved for $ext_rent_allowance output + + ;; [8, 16) length of the buffer + (data (i32.const 8) "\08") + + ;; [16, inf) zero initialized + (func $assert (param i32) (block $ok (br_if $ok @@ -16,30 +21,21 @@ (func (export "call")) (func (export "deploy") - ;; fill the scratch buffer with the rent allowance. - (call $ext_rent_allowance) + ;; fill the buffer with the rent allowance. + (call $ext_rent_allowance (i32.const 0) (i32.const 8)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 8)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to >::max_value(). (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 0xFFFFFFFFFFFFFFFF) ) ) diff --git a/frame/contracts/fixtures/crypto_hashes.wat b/frame/contracts/fixtures/crypto_hashes.wat index 6dbca33928..f7b244b8c1 100644 --- a/frame/contracts/fixtures/crypto_hashes.wat +++ b/frame/contracts/fixtures/crypto_hashes.wat @@ -1,7 +1,6 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "ext_hash_sha2_256" (func $ext_hash_sha2_256 (param i32 i32 i32))) (import "env" "ext_hash_keccak_256" (func $ext_hash_keccak_256 (param i32 i32 i32))) @@ -25,8 +24,7 @@ ;; Called by the tests. ;; - ;; The `call` function expects data in a certain format in the scratch - ;; buffer. + ;; The `call` function expects data in a certain format in the input buffer. ;; ;; 1. The first byte encodes an identifier for the crypto hash function ;; under test. (*) @@ -34,7 +32,7 @@ ;; crypto hash function chosen in 1. ;; ;; The `deploy` function then computes the chosen crypto hash function - ;; given the input and puts the result back into the scratch buffer. + ;; given the input and puts the result into the output buffer. ;; After contract execution the test driver then asserts that the returned ;; values are equal to the expected bytes for the input and chosen hash ;; function. @@ -48,33 +46,36 @@ ;; | 2 | BLAKE2 | 256 | ;; | 3 | BLAKE2 | 128 | ;; --------------------------------- - (func (export "call") (result i32) + (func (export "call") (local $chosen_hash_fn i32) + (local $input_len_ptr i32) (local $input_ptr i32) (local $input_len i32) (local $output_ptr i32) (local $output_len i32) + (local.set $input_len_ptr (i32.const 256)) (local.set $input_ptr (i32.const 10)) - (call $ext_scratch_read (local.get $input_ptr) (i32.const 0) (call $ext_scratch_size)) + (i32.store (local.get $input_len_ptr) (i32.const 246)) + (call $ext_input (local.get $input_ptr) (local.get $input_len_ptr)) (local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr))) (if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7)) ;; We check that the chosen hash fn identifier is within bounds: [0,7] (unreachable) ) (local.set $input_ptr (i32.add (local.get $input_ptr) (i32.const 1))) - (local.set $input_len (i32.sub (call $ext_scratch_size) (i32.const 1))) - (local.set $output_ptr (i32.const 100)) + (local.set $input_len (i32.sub (i32.load (local.get $input_len_ptr)) (i32.const 1))) (local.set $output_len (i32.load8_u (local.get $chosen_hash_fn))) (call_indirect (type $hash_fn_sig) (local.get $input_ptr) (local.get $input_len) - (local.get $output_ptr) + (local.get $input_ptr) (local.get $chosen_hash_fn) ;; Which crypto hash function to execute. ) - (call $ext_scratch_write - (local.get $output_ptr) ;; Linear memory location of the output buffer. + (call $ext_return + (i32.const 0) + (local.get $input_ptr) ;; Linear memory location of the output buffer. (local.get $output_len) ;; Number of output buffer bytes. ) - (i32.const 0) + (unreachable) ) ) diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index c8cf7271d7..ee191aa019 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -1,12 +1,28 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 8) Endowment to send when creating contract. + (data (i32.const 0) "\00\00\01") + + ;; [8, 16) Value to send when calling contract. + + ;; [16, 48) The key to store the contract address under. + + ;; [48, 80) Buffer where to store the input to the contract + + ;; [80, 88) Buffer where to store the address of the instantiated contract + + ;; [88, 96) Size of the buffer + (data (i32.const 88) "\08") + + ;; [96, 100) Size of the input buffer + (data (i32.const 96) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -17,21 +33,15 @@ ) (func (export "deploy") - ;; Input data is the code hash of the contract to be deployed. + ;; Input data is the code hash of the contract to be deployed. + (call $ext_input (i32.const 48) (i32.const 96)) (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 96)) (i32.const 32) ) ) - ;; Copy code hash from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 48) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 32) ;; Count of bytes to copy. - ) - ;; Deploy the contract with the provided code hash. (call $assert (i32.eq @@ -43,23 +53,22 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 80) ;; Buffer where to store address of new contract + (i32.const 88) ;; Pointer to the length of the buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this cas ) (i32.const 0) ) ) - ;; Read the address of the instantiated contract into memory. + ;; Check that address has expected length (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 88)) (i32.const 8) ) ) - (call $ext_scratch_read - (i32.const 80) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) ;; Store the return address. (call $ext_set_storage @@ -75,21 +84,18 @@ (i32.eq (call $ext_get_storage (i32.const 16) ;; Pointer to the key + (i32.const 80) ;; Pointer to the value + (i32.const 88) ;; Pointer to the len of the value ) (i32.const 0) ) ) (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 88)) (i32.const 8) ) ) - (call $ext_scratch_read - (i32.const 80) ;; The pointer where to store the contract address. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) ;; Calling the destination contract with non-empty input data should fail. (call $assert @@ -102,8 +108,11 @@ (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 1) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) - (i32.const 0x0100) + (i32.const 0x1) ) ) @@ -118,6 +127,8 @@ (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) @@ -136,13 +147,11 @@ (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 1) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) ) ) - - (data (i32.const 0) "\00\00\01") ;; Endowment to send when creating contract. - (data (i32.const 8) "") ;; Value to send when calling contract. - (data (i32.const 16) "") ;; The key to store the contract address under. ) diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index d08e1dd0d2..1b3172b2a0 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -1,10 +1,15 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_balance" (func $ext_balance (param i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 8) reserved for $ext_balance output + + ;; [8, 16) length of the buffer + (data (i32.const 8) "\08") + + ;; [16, inf) zero initialized + (func $assert (param i32) (block $ok (br_if $ok @@ -18,34 +23,29 @@ (func (export "call") ;; Send entire remaining balance to the 0 address. - (call $ext_balance) + (call $ext_balance (i32.const 0) (i32.const 8)) ;; Balance should be encoded as a u64. (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 8)) (i32.const 8) ) ) - ;; Read balance into memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer to write balance to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded balance - ) - ;; Self-destruct by sending full balance to the 0 address. (call $assert (i32.eq (call $ext_call - (i32.const 0) ;; Pointer to destination address + (i32.const 16) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 07e11e9d38..4107587ada 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -1,5 +1,6 @@ (module (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) (import "env" "ext_restore_to" (func $ext_restore_to (param i32 i32 i32 i32 i32 i32 i32 i32) @@ -7,7 +8,25 @@ ) (import "env" "memory" (memory 1 1)) + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + (func (export "call") + ;; copy code hash to contract memory + (call $ext_input (i32.const 264) (i32.const 304)) + (call $assert + (i32.eq + (i32.load (i32.const 304)) + (i32.const 32) + ) + ) + (call $ext_restore_to ;; Pointer and length of the encoded dest buffer. (i32.const 256) @@ -49,12 +68,11 @@ ;; Address of bob (data (i32.const 256) "\02\00\00\00\00\00\00\00") - ;; Code hash of SET_RENT - (data (i32.const 264) - "\ab\d6\58\65\1e\83\6e\4a\18\0d\f2\6d\bc\42\ba\e9" - "\3d\64\76\e5\30\5b\33\46\bb\4d\43\99\38\21\ee\32" - ) + ;; [264, 296) Code hash of SET_RENT (copied here by ext_input) - ;; Rent allowance + ;; [296, 304) Rent allowance (data (i32.const 296) "\32\00\00\00\00\00\00\00") + + ;; [304, 308) Size of SET_RENT buffer + (data (i32.const 304) "\20") ) diff --git a/frame/contracts/fixtures/return_from_start_fn.wat b/frame/contracts/fixtures/return_from_start_fn.wat index ac898d4d94..ba73ef25ed 100644 --- a/frame/contracts/fixtures/return_from_start_fn.wat +++ b/frame/contracts/fixtures/return_from_start_fn.wat @@ -1,5 +1,5 @@ (module - (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -12,6 +12,7 @@ (i32.const 4) ;; The data buffer's length ) (call $ext_return + (i32.const 0) (i32.const 8) (i32.const 4) ) diff --git a/frame/contracts/fixtures/return_with_data.wat b/frame/contracts/fixtures/return_with_data.wat index 8cc84006a0..ad42845ae0 100644 --- a/frame/contracts/fixtures/return_with_data.wat +++ b/frame/contracts/fixtures/return_with_data.wat @@ -1,39 +1,33 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 128) buffer where input is copied + + ;; [128, 132) length of the input buffer + (data (i32.const 128) "\80") + ;; Deploy routine is the same as call. - (func (export "deploy") (result i32) + (func (export "deploy") (call $call) ) ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. - (func $call (export "call") (result i32) - (local $buf_size i32) - (local $exit_status i32) - - ;; Find out the size of the scratch buffer - (set_local $buf_size (call $ext_scratch_size)) - - ;; Copy scratch buffer into this contract memory. - (call $ext_scratch_read - (i32.const 0) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (get_local $buf_size) ;; Count of bytes to copy. - ) + (func $call (export "call") + ;; Copy input into this contracts memory. + (call $ext_input (i32.const 0) (i32.const 128)) ;; Copy all but the first 4 bytes of the input data as the output data. - (call $ext_scratch_write + ;; Use the first byte as exit status + (call $ext_return + (i32.load8_u (i32.const 0)) ;; Exit status (i32.const 4) ;; Pointer to the data to return. (i32.sub ;; Count of bytes to copy. - (get_local $buf_size) + (i32.load (i32.const 128)) (i32.const 4) ) ) - - ;; Return the first 4 bytes of the input data as the exit status. - (i32.load (i32.const 0)) + (unreachable) ) ) diff --git a/frame/contracts/fixtures/self_destruct.wat b/frame/contracts/fixtures/self_destruct.wat index 464b5c663e..baa38e4d47 100644 --- a/frame/contracts/fixtures/self_destruct.wat +++ b/frame/contracts/fixtures/self_destruct.wat @@ -1,11 +1,25 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_address" (func $ext_address)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_address" (func $ext_address (param i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 8) reserved for $ext_address output + + ;; [8, 16) length of the buffer + (data (i32.const 8) "\08") + + ;; [16, 24) Address of django + (data (i32.const 16) "\04\00\00\00\00\00\00\00") + + ;; [24, 32) reserved for output of $ext_input + + ;; [32, 36) length of the buffer + (data (i32.const 32) "\04") + + ;; [36, inf) zero initialized + (func $assert (param i32) (block $ok (br_if $ok @@ -22,36 +36,32 @@ ;; This should trap instead of self-destructing since a contract cannot be removed live in ;; the execution stack cannot be removed. If the recursive call traps, then trap here as ;; well. - (if (call $ext_scratch_size) + (call $ext_input (i32.const 24) (i32.const 32)) + (if (i32.load (i32.const 32)) (then - (call $ext_address) + (call $ext_address (i32.const 0) (i32.const 8)) ;; Expect address to be 8 bytes. (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 8)) (i32.const 8) ) ) - ;; Read own address into memory. - (call $ext_scratch_read - (i32.const 16) ;; Pointer to write address to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded address - ) - ;; Recursively call self with empty input data. (call $assert (i32.eq (call $ext_call - (i32.const 16) ;; Pointer to own address + (i32.const 0) ;; Pointer to own address (i32.const 8) ;; Length of own address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) @@ -60,13 +70,11 @@ (else ;; Try to terminate and give balance to django. (call $ext_terminate - (i32.const 32) ;; Pointer to beneficiary address + (i32.const 16) ;; Pointer to beneficiary address (i32.const 8) ;; Length of beneficiary address ) (unreachable) ;; ext_terminate never returns ) ) ) - ;; Address of django - (data (i32.const 32) "\04\00\00\00\00\00\00\00") ) diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index b19d6e5b50..3b99db001c 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -1,10 +1,15 @@ (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_balance" (func $ext_balance (param i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 8) reserved for $ext_balance output + + ;; [8, 16) length of the buffer + (data (i32.const 8) "\08") + + ;; [16, inf) zero initialized + (func $assert (param i32) (block $ok (br_if $ok @@ -16,34 +21,29 @@ (func (export "deploy") ;; Send entire remaining balance to the 0 address. - (call $ext_balance) + (call $ext_balance (i32.const 0) (i32.const 8)) ;; Balance should be encoded as a u64. (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 8)) (i32.const 8) ) ) - ;; Read balance into memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer to write balance to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded balance - ) - ;; Self-destruct by sending full balance to the 0 address. (call $assert (i32.eq (call $ext_call - (i32.const 0) ;; Pointer to destination address + (i32.const 16) ;; Pointer to destination address (i32.const 8) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) (i32.const 0) ) diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index 3e6bd491bc..4e6424e720 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -1,10 +1,9 @@ (module - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; insert a value of 4 bytes into storage @@ -25,12 +24,7 @@ ;; transfer 50 to CHARLIE (func $call_2 - (call $assert - (i32.eq - (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) - (i32.const 0) - ) - ) + (call $ext_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) ) ;; do nothing @@ -48,8 +42,10 @@ ;; Dispatch the call according to input size (func (export "call") (local $input_size i32) + (i32.store (i32.const 64) (i32.const 64)) + (call $ext_input (i32.const 1024) (i32.const 64)) (set_local $input_size - (call $ext_scratch_size) + (i32.load (i32.const 64)) ) (block $IF_ELSE (block $IF_2 @@ -75,29 +71,27 @@ ;; Set into storage a 4 bytes value ;; Set call set_rent_allowance with input (func (export "deploy") - (local $input_size i32) - (set_local $input_size - (call $ext_scratch_size) - ) (call $ext_set_storage (i32.const 0) (i32.const 0) (i32.const 4) ) - (call $ext_scratch_read - (i32.const 0) + (call $ext_input (i32.const 0) - (get_local $input_size) + (i32.const 64) ) (call $ext_set_rent_allowance (i32.const 0) - (get_local $input_size) + (i32.load (i32.const 64)) ) ) ;; Encoding of 10 in balance (data (i32.const 0) "\28") + ;; Size of the buffer at address 0 + (data (i32.const 64) "\40") + ;; encoding of Charlies's account id (data (i32.const 68) "\03") diff --git a/frame/contracts/fixtures/storage_size.wat b/frame/contracts/fixtures/storage_size.wat index 8de9f42ee9..579aeda3a0 100644 --- a/frame/contracts/fixtures/storage_size.wat +++ b/frame/contracts/fixtures/storage_size.wat @@ -1,10 +1,22 @@ (module - (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) + (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) (import "env" "memory" (memory 16 16)) + ;; [0, 32) storage key + (data (i32.const 0) "\01") + + ;; [32, 36) buffer where input is copied (expected size of storage item) + + ;; [36, 40) size of the input buffer + (data (i32.const 36) "\04") + + ;; [40, 44) size of buffer for ext_get_storage set to max + (data (i32.const 40) "\FF\FF\FF\FF") + + ;; [44, inf) ext_get_storage buffer + (func $assert (param i32) (block $ok (br_if $ok @@ -15,21 +27,16 @@ ) (func (export "call") - ;; assert $ext_scratch_size == 8 + (call $ext_input (i32.const 32) (i32.const 36)) + + ;; assert input size == 4 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 36)) (i32.const 4) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 32) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 4) ;; Count of bytes to copy. - ) - ;; place a garbage value in storage, the size of which is specified by the call input. (call $ext_set_storage (i32.const 0) ;; Pointer to storage key @@ -41,6 +48,8 @@ (i32.eq (call $ext_get_storage (i32.const 0) ;; Pointer to storage key + (i32.const 44) ;; buffer where to copy result + (i32.const 40) ;; pointer to size of buffer ) (i32.const 0) ) @@ -48,7 +57,7 @@ (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 40)) (i32.load (i32.const 32)) ) ) @@ -56,5 +65,4 @@ (func (export "deploy")) - (data (i32.const 0) "\01") ;; Storage key (32 B) ) diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 84fd66826d..7d208cf776 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -35,12 +35,15 @@ pub enum ContractExecResult { /// /// There is a status code and, optionally, some data returned by the contract. Success { - /// Status code returned by the contract. - status: u8, + /// Flags that the contract passed along on returning to alter its exit behaviour. + /// Described in `pallet_contracts::exec::ReturnFlags`. + flags: u32, /// Output data returned by the contract. /// /// Can be empty. data: Vec, + /// How much gas was consumed by the call. + gas_consumed: u64, }, /// The contract execution either trapped or returned an error. Error, diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 18496c13af..d99ed1e78a 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -92,10 +92,12 @@ pub struct CallRequest { pub enum RpcContractExecResult { /// Successful execution Success { - /// Status code - status: u8, + /// The return flags + flags: u32, /// Output data data: Bytes, + /// How much gas was consumed by the call. + gas_consumed: u64, }, /// Error execution Error(()), @@ -104,9 +106,14 @@ pub enum RpcContractExecResult { impl From for RpcContractExecResult { fn from(r: ContractExecResult) -> Self { match r { - ContractExecResult::Success { status, data } => RpcContractExecResult::Success { - status, + ContractExecResult::Success { + flags, + data, + gas_consumed + } => RpcContractExecResult::Success { + flags, data: data.into(), + gas_consumed, }, ContractExecResult::Error => RpcContractExecResult::Error(()), } @@ -309,7 +316,7 @@ mod tests { let actual = serde_json::to_string(&res).unwrap(); assert_eq!(actual, expected); } - test(r#"{"success":{"status":5,"data":"0x1234"}}"#); + test(r#"{"success":{"flags":5,"data":"0x1234","gas_consumed":5000}}"#); test(r#"{"error":null}"#); } } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 4e68aac615..27b843c5e1 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -19,11 +19,12 @@ use super::{CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, use crate::gas::{Gas, GasMeter, Token}; use crate::rent; use crate::storage; +use bitflags::bitflags; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, Zero, Convert}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, + dispatch::DispatchError, traits::{ExistenceRequirement, Currency, Time, Randomness}, weights::Weight, }; @@ -37,58 +38,31 @@ pub type StorageKey = [u8; 32]; /// A type that represents a topic of an event. At the moment a hash is used. pub type TopicOf = ::Hash; -/// A status code return to the source of a contract call or instantiation indicating success or -/// failure. A code of 0 indicates success and that changes are applied. All other codes indicate -/// failure and that changes are reverted. The particular code in the case of failure is opaque and -/// may be interpreted by the calling contract. -pub type StatusCode = u8; - -/// The status code indicating success. -pub const STATUS_SUCCESS: StatusCode = 0; +bitflags! { + /// Flags used by a contract to customize exit behaviour. + pub struct ReturnFlags: u32 { + /// If this bit is set all changes made by the contract exection are rolled back. + const REVERT = 0x0000_0001; + } +} /// Output of a contract call or instantiation which ran to completion. #[cfg_attr(test, derive(PartialEq, Eq, Debug))] pub struct ExecReturnValue { - pub status: StatusCode, + /// Flags passed along by `ext_return`. Empty when `ext_return` was never called. + pub flags: ReturnFlags, + /// Buffer passed along by `ext_return`. Empty when `ext_return` was never called. pub data: Vec, } impl ExecReturnValue { - /// Returns whether the call or instantiation exited with a successful status code. + /// We understand the absense of a revert flag as success. pub fn is_success(&self) -> bool { - self.status == STATUS_SUCCESS + !self.flags.contains(ReturnFlags::REVERT) } } -/// An error indicating some failure to execute a contract call or instantiation. This can include -/// VM-specific errors during execution (eg. division by 0, OOB access, failure to satisfy some -/// precondition of a system call, etc.) or errors with the orchestration (eg. out-of-gas errors, a -/// non-existent destination contract, etc.). -#[cfg_attr(test, derive(sp_runtime::RuntimeDebug))] -pub struct ExecError { - pub reason: DispatchError, - /// This is an allocated buffer that may be reused. The buffer must be cleared explicitly - /// before reuse. - pub buffer: Vec, -} - -pub type ExecResult = Result; - -/// Evaluate an expression of type Result<_, &'static str> and either resolve to the value if Ok or -/// wrap the error string into an ExecutionError with the provided buffer and return from the -/// enclosing function. This macro is used instead of .map_err(..)? in order to avoid taking -/// ownership of buffer unless there is an error. -#[macro_export] -macro_rules! try_or_exec_error { - ($e:expr, $buffer:expr) => { - match $e { - Ok(val) => val, - Err(reason) => return Err( - $crate::exec::ExecError { reason: reason.into(), buffer: $buffer } - ), - } - } -} +pub type ExecResult = Result; /// An interface that provides access to the external environment in which the /// smart-contract is executed. @@ -118,7 +92,7 @@ pub trait Ext { value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; + ) -> Result<(AccountIdOf, ExecReturnValue), DispatchError>; /// Transfer some amount of funds into the specified account. fn transfer( @@ -208,11 +182,6 @@ pub trait Ext { /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; - /// Returns the value of runtime under the given key. - /// - /// Returns `None` if the value doesn't exist. - fn get_runtime_storage(&self, key: &[u8]) -> Option>; - /// Returns the price for the specified amount of weight. fn get_weight_price(&self, weight: Weight) -> BalanceOf; } @@ -331,20 +300,14 @@ where input_data: Vec, ) -> ExecResult { if self.depth == self.config.max_depth as usize { - return Err(ExecError { - reason: "reached maximum depth, cannot make a call".into(), - buffer: input_data, - }); + Err("reached maximum depth, cannot make a call")? } if gas_meter .charge(self.config, ExecFeeToken::Call) .is_out_of_gas() { - return Err(ExecError { - reason: "not enough gas to pay base call fee".into(), - buffer: input_data, - }); + Err("not enough gas to pay base call fee")? } // Assumption: `collect_rent` doesn't collide with overlay because @@ -354,10 +317,7 @@ where // Calls to dead contracts always fail. if let Some(ContractInfo::Tombstone(_)) = contract_info { - return Err(ExecError { - reason: "contract has been evicted".into(), - buffer: input_data, - }); + Err("contract has been evicted")? }; let caller = self.self_account.clone(); @@ -365,27 +325,21 @@ where self.with_nested_context(dest.clone(), dest_trie_id, |nested| { if value > BalanceOf::::zero() { - try_or_exec_error!( - transfer( - gas_meter, - TransferCause::Call, - &caller, - &dest, - value, - nested, - ), - input_data - ); + transfer( + gas_meter, + TransferCause::Call, + &caller, + &dest, + value, + nested, + )? } // If code_hash is not none, then the destination account is a live contract, otherwise // it is a regular account since tombstone accounts have already been rejected. match storage::code_hash::(&dest) { Ok(dest_code_hash) => { - let executable = try_or_exec_error!( - nested.loader.load_main(&dest_code_hash), - input_data - ); + let executable = nested.loader.load_main(&dest_code_hash)?; let output = nested.vm .execute( &executable, @@ -395,7 +349,7 @@ where )?; Ok(output) } - Err(storage::ContractAbsentError) => Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }), + Err(storage::ContractAbsentError) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), } }) } @@ -406,22 +360,16 @@ where gas_meter: &mut GasMeter, code_hash: &CodeHash, input_data: Vec, - ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { + ) -> Result<(T::AccountId, ExecReturnValue), DispatchError> { if self.depth == self.config.max_depth as usize { - return Err(ExecError { - reason: "reached maximum depth, cannot instantiate".into(), - buffer: input_data, - }); + Err("reached maximum depth, cannot instantiate")? } if gas_meter .charge(self.config, ExecFeeToken::Instantiate) .is_out_of_gas() { - return Err(ExecError { - reason: "not enough gas to pay base instantiate fee".into(), - buffer: input_data, - }); + Err("not enough gas to pay base instantiate fee")? } let caller = self.self_account.clone(); @@ -437,36 +385,27 @@ where let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); let output = self.with_nested_context(dest.clone(), Some(dest_trie_id), |nested| { - try_or_exec_error!( - storage::place_contract::( - &dest, - nested - .self_trie_id - .clone() - .expect("the nested context always has to have self_trie_id"), - code_hash.clone() - ), - input_data - ); + storage::place_contract::( + &dest, + nested + .self_trie_id + .clone() + .expect("the nested context always has to have self_trie_id"), + code_hash.clone() + )?; // Send funds unconditionally here. If the `endowment` is below existential_deposit // then error will be returned here. - try_or_exec_error!( - transfer( - gas_meter, - TransferCause::Instantiate, - &caller, - &dest, - endowment, - nested, - ), - input_data - ); - - let executable = try_or_exec_error!( - nested.loader.load_init(&code_hash), - input_data - ); + transfer( + gas_meter, + TransferCause::Instantiate, + &caller, + &dest, + endowment, + nested, + )?; + + let executable = nested.loader.load_init(&code_hash)?; let output = nested.vm .execute( &executable, @@ -477,10 +416,7 @@ where // Error out if insufficient remaining balance. if T::Currency::free_balance(&dest) < nested.config.existential_deposit { - return Err(ExecError { - reason: "insufficient remaining balance".into(), - buffer: output.data, - }); + Err("insufficient remaining balance")? } // Deposit an instantiation event. @@ -518,7 +454,7 @@ where frame_support::storage::with_transaction(|| { let output = func(&mut nested); match output { - Ok(ref rv) if rv.is_success() => Commit(output), + Ok(ref rv) if !rv.flags.contains(ReturnFlags::REVERT) => Commit(output), _ => Rollback(output), } }) @@ -681,7 +617,7 @@ where endowment: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + ) -> Result<(AccountIdOf, ExecReturnValue), DispatchError> { self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) } @@ -839,10 +775,6 @@ where self.ctx.config.max_value_size } - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - unhashed::get_raw(&key) - } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { T::WeightPrice::convert(weight) } @@ -867,11 +799,11 @@ fn deposit_event( mod tests { use super::{ BalanceOf, Event, ExecFeeToken, ExecResult, ExecutionContext, Ext, Loader, - RawEvent, TransferFeeKind, TransferFeeToken, Vm, + RawEvent, TransferFeeKind, TransferFeeToken, Vm, ReturnFlags, }; use crate::{ gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, - exec::{ExecReturnValue, ExecError, STATUS_SUCCESS}, CodeHash, Config, + exec::ExecReturnValue, CodeHash, Config, gas::Gas, storage, }; @@ -980,7 +912,7 @@ mod tests { } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } #[test] @@ -1096,7 +1028,7 @@ mod tests { let vm = MockVm::new(); let mut loader = MockLoader::empty(); let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: Vec::new() }) + |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) ); ExtBuilder::default().build().execute_with(|| { @@ -1228,10 +1160,7 @@ mod tests { assert_matches!( result, - Err(ExecError { - reason: DispatchError::Module { message: Some("InsufficientBalance"), .. }, - buffer: _, - }) + Err(DispatchError::Module { message: Some("InsufficientBalance"), .. }) ); assert_eq!(get_balance(&origin), 0); assert_eq!(get_balance(&dest), 0); @@ -1248,7 +1177,7 @@ mod tests { let vm = MockVm::new(); let mut loader = MockLoader::empty(); let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: STATUS_SUCCESS, data: vec![1, 2, 3, 4] }) + |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) ); ExtBuilder::default().build().execute_with(|| { @@ -1279,7 +1208,7 @@ mod tests { let vm = MockVm::new(); let mut loader = MockLoader::empty(); let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: vec![1, 2, 3, 4] }) + |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) ); ExtBuilder::default().build().execute_with(|| { @@ -1370,10 +1299,7 @@ mod tests { // Verify that we've got proper error and set `reached_bottom`. assert_matches!( r, - Err(ExecError { - reason: DispatchError::Other("reached maximum depth, cannot make a call"), - buffer: _, - }) + Err(DispatchError::Other("reached maximum depth, cannot make a call")) ); *reached_bottom = true; } else { @@ -1517,7 +1443,7 @@ mod tests { let mut loader = MockLoader::empty(); let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { status: STATUS_SUCCESS, data: vec![80, 65, 83, 83] }) + |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -1550,7 +1476,7 @@ mod tests { let mut loader = MockLoader::empty(); let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: vec![70, 65, 73, 76] }) + |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -1627,7 +1553,7 @@ mod tests { let mut loader = MockLoader::empty(); let dummy_ch = loader.insert( - |_| Err(ExecError { reason: "It's a trap!".into(), buffer: Vec::new() }) + |_| Err("It's a trap!".into()) ); let instantiator_ch = loader.insert({ let dummy_ch = dummy_ch.clone(); @@ -1640,7 +1566,7 @@ mod tests { ctx.gas_meter, vec![] ), - Err(ExecError { reason: DispatchError::Other("It's a trap!"), buffer: _ }) + Err(DispatchError::Other("It's a trap!")) ); exec_success() @@ -1691,10 +1617,7 @@ mod tests { &terminate_ch, vec![], ), - Err(ExecError { - reason: DispatchError::Other("insufficient remaining balance"), - buffer - }) if buffer == Vec::::new() + Err(DispatchError::Other("insufficient remaining balance")) ); assert_eq!( diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 38f231c008..0ae1952de0 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -178,8 +178,8 @@ impl GasMeter { } } - /// Returns how much gas left from the initial budget. - fn gas_spent(&self) -> Gas { + /// Returns how much gas was used. + pub fn gas_spent(&self) -> Gas { self.gas_limit - self.gas_left } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 182c6cd330..c00e07c062 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -93,7 +93,7 @@ use crate::exec::ExecutionContext; use crate::wasm::{WasmLoader, WasmVm}; pub use crate::gas::{Gas, GasMeter}; -pub use crate::exec::{ExecResult, ExecReturnValue, ExecError, StatusCode}; +pub use crate::exec::{ExecResult, ExecReturnValue}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; @@ -410,7 +410,11 @@ decl_error! { /// Tombstones don't match. InvalidTombstone, /// An origin TrieId written in the current block. - InvalidContractOrigin + InvalidContractOrigin, + /// The executed contract exhausted its gas limit. + OutOfGas, + /// The output buffer supplied to a contract API call was too small. + OutputBufferTooSmall, } } @@ -515,7 +519,7 @@ decl_module! { let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { ctx.call(dest, value, gas_meter, data) }); - gas_meter.into_dispatch_result(result.map_err(|e| e.reason)) + gas_meter.into_dispatch_result(result) } /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. @@ -543,7 +547,7 @@ decl_module! { ctx.instantiate(endowment, gas_meter, &code_hash, data) .map(|(_address, output)| output) }); - gas_meter.into_dispatch_result(result.map_err(|e| e.reason)) + gas_meter.into_dispatch_result(result) } /// Allows block producers to claim a small reward for evicting a contract. If a block producer @@ -587,17 +591,22 @@ impl Module { /// /// This function is similar to `Self::call`, but doesn't perform any address lookups and better /// suitable for calling directly from Rust. + /// + /// It returns the exection result and the amount of used weight. pub fn bare_call( origin: T::AccountId, dest: T::AccountId, value: BalanceOf, gas_limit: Gas, input_data: Vec, - ) -> ExecResult { + ) -> (ExecResult, Gas) { let mut gas_meter = GasMeter::new(gas_limit); - Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, input_data) - }) + ( + Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { + ctx.call(dest, value, gas_meter, input_data) + }), + gas_meter.gas_spent(), + ) } /// Query storage of a specified contract under a specified key. @@ -673,10 +682,6 @@ decl_event! { /// Triggered when the current schedule is updated. ScheduleUpdated(u32), - /// A call was dispatched from the given account. The bool signals whether it was - /// successful execution or not. - Dispatched(AccountId, bool), - /// An event deposited upon execution of a contract from the account. ContractExecution(AccountId, Vec), } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a54bfad654..7af514f5dc 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -17,6 +17,7 @@ use crate::{ BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, + Error, }; use assert_matches::assert_matches; use hex_literal::*; @@ -478,7 +479,7 @@ fn run_out_of_gas() { 67_500_000, vec![], ), - "ran out of gas during contract execution" + Error::::OutOfGas, ); }); } @@ -1169,7 +1170,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: DJANGO, 0, GAS_LIMIT, - vec![], + set_rent_code_hash.as_ref().to_vec(), ) }; @@ -1294,7 +1295,7 @@ fn storage_max_value_limit() { Origin::signed(ALICE), BOB, 0, - GAS_LIMIT, + GAS_LIMIT * 2, // we are copying a huge buffer Encode::encode(&self::MaxValueSize::get()), )); @@ -1594,8 +1595,8 @@ fn crypto_hashes() { 0, GAS_LIMIT, params, - ).unwrap(); - assert_eq!(result.status, 0); + ).0.unwrap(); + assert!(result.is_success()); let expected = hash_fn(input.as_ref()); assert_eq!(&result.data[..*expected_size], &*expected); } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 3d2f5b154f..500c0f4dcc 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -151,9 +151,8 @@ impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a> { mod tests { use super::*; use std::collections::HashMap; - use std::cell::RefCell; use sp_core::H256; - use crate::exec::{Ext, StorageKey, ExecError, ExecReturnValue, STATUS_SUCCESS}; + use crate::exec::{Ext, StorageKey, ExecReturnValue, ReturnFlags}; use crate::gas::{Gas, GasMeter}; use crate::tests::{Test, Call}; use crate::wasm::prepare::prepare_contract; @@ -210,17 +209,6 @@ mod tests { // (topics, data) events: Vec<(Vec, Vec)>, next_account_id: u64, - - /// Runtime storage keys works the following way. - /// - /// - If the test code requests a value and it doesn't exist in this storage map then a - /// panic happens. - /// - If the value does exist it is returned and then removed from the map. So a panic - /// happens if the same value is requested for the second time. - /// - /// This behavior is used to prevent mixing up an access to unexpected location and empty - /// cell. - runtime_storage_keys: RefCell, Option>>>, } impl Ext for MockExt { @@ -238,7 +226,7 @@ mod tests { endowment: u64, gas_meter: &mut GasMeter, data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { + ) -> Result<(u64, ExecReturnValue), DispatchError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, @@ -251,7 +239,7 @@ mod tests { Ok(( address, ExecReturnValue { - status: STATUS_SUCCESS, + flags: ReturnFlags::empty(), data: Vec::new(), }, )) @@ -285,7 +273,7 @@ mod tests { }); // Assume for now that it was just a plain transfer. // TODO: Add tests for different call outcomes. - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } fn terminate( &mut self, @@ -358,18 +346,6 @@ mod tests { fn max_value_size(&self) -> u32 { 16_384 } - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - let opt_value = self.runtime_storage_keys - .borrow_mut() - .remove(key); - opt_value.unwrap_or_else(|| - panic!( - "{:?} doesn't exist. values that do exist {:?}", - key, - self.runtime_storage_keys - ) - ) - } fn get_weight_price(&self, weight: Weight) -> BalanceOf { BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } @@ -390,7 +366,7 @@ mod tests { value: u64, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { + ) -> Result<(u64, ExecReturnValue), DispatchError> { (**self).instantiate(code, value, gas_meter, input_data) } fn transfer( @@ -470,9 +446,6 @@ mod tests { fn max_value_size(&self) -> u32 { (**self).max_value_size() } - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - (**self).get_runtime_storage(key) - } fn get_weight_price(&self, weight: Weight) -> BalanceOf { (**self).get_weight_price(weight) } @@ -511,16 +484,14 @@ mod tests { ;; value_ptr: u32, ;; value_len: u32, ;;) -> u32 - (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32) (result i32))) + (import "env" "ext_transfer" (func $ext_transfer (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (drop - (call $ext_transfer - (i32.const 4) ;; Pointer to "account" address. - (i32.const 8) ;; Length of "account" address. - (i32.const 12) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - ) + (call $ext_transfer + (i32.const 4) ;; Pointer to "account" address. + (i32.const 8) ;; Length of "account" address. + (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. ) ) (func (export "deploy")) @@ -551,7 +522,7 @@ mod tests { to: 7, value: 153, data: Vec::new(), - gas_left: 9989000000, + gas_left: 9989500000, }] ); } @@ -565,9 +536,11 @@ mod tests { ;; value_ptr: u32, ;; value_len: u32, ;; input_data_ptr: u32, - ;; input_data_len: u32 + ;; input_data_len: u32, + ;; output_ptr: u32, + ;; output_len_ptr: u32 ;;) -> u32 - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -579,6 +552,8 @@ mod tests { (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 20) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) ) ) @@ -611,7 +586,7 @@ mod tests { to: 9, value: 6, data: vec![1, 2, 3, 4], - gas_left: 9985500000, + gas_left: 9984500000, }] ); } @@ -626,8 +601,13 @@ mod tests { ;; value_len: u32, ;; input_data_ptr: u32, ;; input_data_len: u32, + ;; input_data_len: u32, + ;; address_ptr: u32, + ;; address_len_ptr: u32, + ;; output_ptr: u32, + ;; output_len_ptr: u32 ;; ) -> u32 - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -639,6 +619,10 @@ mod tests { (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 12) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy address + (i32.const 0) ;; Length is ignored in this case + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case ) ) ) @@ -673,7 +657,7 @@ mod tests { code_hash: [0x11; 32].into(), endowment: 3, data: vec![1, 2, 3, 4], - gas_left: 9973500000, + gas_left: 9971500000, }] ); } @@ -728,9 +712,11 @@ mod tests { ;; value_ptr: u32, ;; value_len: u32, ;; input_data_ptr: u32, - ;; input_data_len: u32 + ;; input_data_len: u32, + ;; output_ptr: u32, + ;; output_len_ptr: u32 ;;) -> u32 - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -742,6 +728,8 @@ mod tests { (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 20) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this cas ) ) ) @@ -781,12 +769,21 @@ mod tests { const CODE_GET_STORAGE: &str = r#" (module - (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_get_storage" (func $ext_get_storage (param i32 i32 i32) (result i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) + ;; [0, 32) key for get storage + (data (i32.const 0) + "\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11" + "\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11" + ) + + ;; [32, 36) buffer size = 128 bytes + (data (i32.const 32) "\80") + + ;; [36; inf) buffer where the result is copied + (func $assert (param i32) (block $ok (br_if $ok @@ -799,12 +796,13 @@ mod tests { (func (export "call") (local $buf_size i32) - - ;; Load a storage value into the scratch buf. + ;; Load a storage value into contract memory. (call $assert (i32.eq (call $ext_get_storage - (i32.const 4) ;; The pointer to the storage key to fetch + (i32.const 0) ;; The pointer to the storage key to fetch + (i32.const 36) ;; Pointer to the output buffer + (i32.const 32) ;; Pointer to the size of the buffer ) ;; Return value 0 means that the value is found and there were @@ -813,23 +811,14 @@ mod tests { ) ) - ;; Find out the size of the scratch buffer + ;; Find out the size of the buffer (set_local $buf_size - (call $ext_scratch_size) - ) - - ;; Copy scratch buffer into this contract memory. - (call $ext_scratch_read - (i32.const 36) ;; The pointer where to store the scratch buffer contents, - ;; 36 = 4 + 32 - (i32.const 0) ;; Offset from the start of the scratch buffer. - (get_local ;; Count of bytes to copy. - $buf_size - ) + (i32.load (i32.const 32)) ) ;; Return the contents of the buffer (call $ext_return + (i32.const 0) (i32.const 36) (get_local $buf_size) ) @@ -839,16 +828,11 @@ mod tests { ) (func (export "deploy")) - - (data (i32.const 4) - "\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11" - "\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11\11" - ) ) "#; #[test] - fn get_storage_puts_data_into_scratch_buf() { + fn get_storage_puts_data_into_buf() { let mut mock_ext = MockExt::default(); mock_ext .storage @@ -861,18 +845,18 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { status: STATUS_SUCCESS, data: [0x22; 32].to_vec() }); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() }); } - /// calls `ext_caller`, loads the address from the scratch buffer and - /// compares it with the constant 42. + /// calls `ext_caller` and compares the result with the constant 42. const CODE_CALLER: &str = r#" (module - (import "env" "ext_caller" (func $ext_caller)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_caller" (func $ext_caller (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -883,30 +867,21 @@ mod tests { ) (func (export "call") - ;; fill the scratch buffer with the caller. - (call $ext_caller) + ;; fill the buffer with the caller. + (call $ext_caller (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 42. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 42) ) ) @@ -926,15 +901,15 @@ mod tests { ).unwrap(); } - /// calls `ext_address`, loads the address from the scratch buffer and - /// compares it with the constant 69. + /// calls `ext_address` and compares the result with the constant 69. const CODE_ADDRESS: &str = r#" (module - (import "env" "ext_address" (func $ext_address)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_address" (func $ext_address (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -945,30 +920,21 @@ mod tests { ) (func (export "call") - ;; fill the scratch buffer with the self address. - (call $ext_address) + ;; fill the buffer with the self address. + (call $ext_address (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert size == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 69. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 69) ) ) @@ -990,11 +956,12 @@ mod tests { const CODE_BALANCE: &str = r#" (module - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_balance" (func $ext_balance (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1005,30 +972,21 @@ mod tests { ) (func (export "call") - ;; This stores the balance in the scratch buffer - (call $ext_balance) + ;; This stores the balance in the buffer + (call $ext_balance (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 228. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 228) ) ) @@ -1050,11 +1008,12 @@ mod tests { const CODE_GAS_PRICE: &str = r#" (module - (import "env" "ext_gas_price" (func $ext_gas_price (param i64))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_weight_to_fee" (func $ext_weight_to_fee (param i64 i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1065,31 +1024,22 @@ mod tests { ) (func (export "call") - ;; This stores the gas price in the scratch buffer - (call $ext_gas_price (i64.const 1)) + ;; This stores the gas price in the buffer + (call $ext_weight_to_fee (i64.const 2) (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - ;; assert that contents of the buffer is equal to the i64 value of 1312. + ;; assert that contents of the buffer is equal to the i64 value of 2 * 1312. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) - (i64.const 1312) + (i64.load (i32.const 0)) + (i64.const 2624) ) ) ) @@ -1110,12 +1060,13 @@ mod tests { const CODE_GAS_LEFT: &str = r#" (module - (import "env" "ext_gas_left" (func $ext_gas_left)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_gas_left" (func $ext_gas_left (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1126,28 +1077,19 @@ mod tests { ) (func (export "call") - ;; This stores the gas left in the scratch buffer - (call $ext_gas_left) + ;; This stores the gas left in the buffer + (call $ext_gas_left (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - (call $ext_return - (i32.const 8) - (i32.const 8) - ) + ;; return gas left + (call $ext_return (i32.const 0) (i32.const 0) (i32.const 8)) (unreachable) ) @@ -1173,11 +1115,12 @@ mod tests { const CODE_VALUE_TRANSFERRED: &str = r#" (module - (import "env" "ext_value_transferred" (func $ext_value_transferred)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_value_transferred" (func $ext_value_transferred (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1188,30 +1131,21 @@ mod tests { ) (func (export "call") - ;; This stores the value transferred in the scratch buffer - (call $ext_value_transferred) + ;; This stores the value transferred in the buffer + (call $ext_value_transferred (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 1337. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 1337) ) ) @@ -1233,12 +1167,13 @@ mod tests { const CODE_RETURN_FROM_START_FN: &str = r#" (module - (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) (start $start) (func $start (call $ext_return + (i32.const 0) (i32.const 8) (i32.const 4) ) @@ -1263,16 +1198,17 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { status: STATUS_SUCCESS, data: vec![1, 2, 3, 4] }); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); } const CODE_TIMESTAMP_NOW: &str = r#" (module - (import "env" "ext_now" (func $ext_now)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_now" (func $ext_now (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1283,30 +1219,21 @@ mod tests { ) (func (export "call") - ;; This stores the block timestamp in the scratch buffer - (call $ext_now) + ;; This stores the block timestamp in the buffer + (call $ext_now (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 1111. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 1111) ) ) @@ -1328,11 +1255,12 @@ mod tests { const CODE_MINIMUM_BALANCE: &str = r#" (module - (import "env" "ext_minimum_balance" (func $ext_minimum_balance)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_minimum_balance" (func $ext_minimum_balance (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1343,29 +1271,20 @@ mod tests { ) (func (export "call") - (call $ext_minimum_balance) + (call $ext_minimum_balance (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 666. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 666) ) ) @@ -1387,11 +1306,12 @@ mod tests { const CODE_TOMBSTONE_DEPOSIT: &str = r#" (module - (import "env" "ext_tombstone_deposit" (func $ext_tombstone_deposit)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_tombstone_deposit" (func $ext_tombstone_deposit (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1402,29 +1322,20 @@ mod tests { ) (func (export "call") - (call $ext_tombstone_deposit) + (call $ext_tombstone_deposit (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 16. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 16) ) ) @@ -1446,12 +1357,21 @@ mod tests { const CODE_RANDOM: &str = r#" (module - (import "env" "ext_random" (func $ext_random (param i32 i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_random" (func $ext_random (param i32 i32 i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) + ;; [0,128) is reserved for the result of PRNG. + + ;; the subject used for the PRNG. [128,160) + (data (i32.const 128) + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + ) + + ;; size of our buffer is 128 bytes + (data (i32.const 160) "\80") + (func $assert (param i32) (block $ok (br_if $ok @@ -1462,42 +1382,30 @@ mod tests { ) (func (export "call") - ;; This stores the block random seed in the scratch buffer + ;; This stores the block random seed in the buffer (call $ext_random - (i32.const 40) ;; Pointer in memory to the start of the subject buffer + (i32.const 128) ;; Pointer in memory to the start of the subject buffer (i32.const 32) ;; The subject buffer's length + (i32.const 0) ;; Pointer to the output buffer + (i32.const 160) ;; Pointer to the output buffer length ) - ;; assert $ext_scratch_size == 32 + ;; assert len == 32 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 160)) (i32.const 32) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 32) ;; Count of bytes to copy. - ) - - ;; return the data from the contract + ;; return the random data (call $ext_return - (i32.const 8) + (i32.const 0) + (i32.const 0) (i32.const 32) ) ) (func (export "deploy")) - - ;; [8,40) is reserved for the result of PRNG. - - ;; the subject used for the PRNG. [40,72) - (data (i32.const 40) - "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" - "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" - ) ) "#; @@ -1516,7 +1424,7 @@ mod tests { assert_eq!( output, ExecReturnValue { - status: STATUS_SUCCESS, + flags: ReturnFlags::empty(), data: hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F").to_vec(), }, ); @@ -1603,9 +1511,7 @@ mod tests { MockExt::default(), &mut gas_meter ), - Err(ExecError { - reason: DispatchError::Other("contract trapped during execution"), buffer: _ - }) + Err(DispatchError::Other("contract trapped during execution")) ); } @@ -1647,19 +1553,19 @@ mod tests { MockExt::default(), &mut gas_meter ), - Err(ExecError { reason: DispatchError::Other("contract trapped during execution"), buffer: _ }) + Err(DispatchError::Other("contract trapped during execution")) ); } - /// calls `ext_block_number`, loads the current block number from the scratch buffer and - /// compares it with the constant 121. + /// calls `ext_block_number` compares the result with the constant 121. const CODE_BLOCK_NUMBER: &str = r#" (module - (import "env" "ext_block_number" (func $ext_block_number)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_block_number" (func $ext_block_number (param i32 i32))) (import "env" "memory" (memory 1 1)) + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + (func $assert (param i32) (block $ok (br_if $ok @@ -1670,30 +1576,21 @@ mod tests { ) (func (export "call") - ;; This stores the block height in the scratch buffer - (call $ext_block_number) + ;; This stores the block height in the buffer + (call $ext_block_number (i32.const 0) (i32.const 32)) - ;; assert $ext_scratch_size == 8 + ;; assert len == 8 (call $assert (i32.eq - (call $ext_scratch_size) + (i32.load (i32.const 32)) (i32.const 8) ) ) - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - ;; assert that contents of the buffer is equal to the i64 value of 121. (call $assert (i64.eq - (i64.load - (i32.const 8) - ) + (i64.load (i32.const 0)) (i64.const 121) ) ) @@ -1713,129 +1610,61 @@ mod tests { ).unwrap(); } - // asserts that the size of the input data is 4. - const CODE_SIMPLE_ASSERT: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy")) - - (func (export "call") - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 4) - ) - ) - ) -) -"#; - - #[test] - fn output_buffer_capacity_preserved_on_success() { - let mut input_data = Vec::with_capacity(1_234); - input_data.extend_from_slice(&[1, 2, 3, 4][..]); - - let output = execute( - CODE_SIMPLE_ASSERT, - input_data, - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); - - assert_eq!(output.data.len(), 0); - assert_eq!(output.data.capacity(), 1_234); - } - - #[test] - fn output_buffer_capacity_preserved_on_failure() { - let mut input_data = Vec::with_capacity(1_234); - input_data.extend_from_slice(&[1, 2, 3, 4, 5][..]); - - let error = execute( - CODE_SIMPLE_ASSERT, - input_data, - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).err().unwrap(); - - assert_eq!(error.buffer.capacity(), 1_234); - } - const CODE_RETURN_WITH_DATA: &str = r#" (module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + (import "env" "ext_input" (func $ext_input (param i32 i32))) + (import "env" "ext_return" (func $ext_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) + (data (i32.const 32) "\20") + ;; Deploy routine is the same as call. - (func (export "deploy") (result i32) + (func (export "deploy") (call $call) ) ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. - (func $call (export "call") (result i32) - (local $buf_size i32) - (local $exit_status i32) - - ;; Find out the size of the scratch buffer - (set_local $buf_size (call $ext_scratch_size)) - - ;; Copy scratch buffer into this contract memory. - (call $ext_scratch_read - (i32.const 0) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (get_local $buf_size) ;; Count of bytes to copy. + (func $call (export "call") + ;; Copy input data this contract memory. + (call $ext_input + (i32.const 0) ;; Pointer where to store input + (i32.const 32) ;; Pointer to the length of the buffer ) ;; Copy all but the first 4 bytes of the input data as the output data. - (call $ext_scratch_write - (i32.const 4) ;; Offset from the start of the scratch buffer. - (i32.sub ;; Count of bytes to copy. - (get_local $buf_size) - (i32.const 4) - ) + (call $ext_return + (i32.load (i32.const 0)) + (i32.const 4) + (i32.sub (i32.load (i32.const 32)) (i32.const 4)) ) - - ;; Return the first 4 bytes of the input data as the exit status. - (i32.load (i32.const 0)) + (unreachable) ) ) "#; #[test] - fn return_with_success_status() { + fn ext_return_with_success_status() { let output = execute( CODE_RETURN_WITH_DATA, - hex!("00112233445566778899").to_vec(), + hex!("00000000445566778899").to_vec(), MockExt::default(), &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { status: 0, data: hex!("445566778899").to_vec() }); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: hex!("445566778899").to_vec() }); assert!(output.is_success()); } #[test] - fn return_with_failure_status() { + fn return_with_revert_status() { let output = execute( CODE_RETURN_WITH_DATA, - hex!("112233445566778899").to_vec(), + hex!("010000005566778899").to_vec(), MockExt::default(), &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { status: 17, data: hex!("5566778899").to_vec() }); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::REVERT, data: hex!("5566778899").to_vec() }); assert!(!output.is_success()); } } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index ba934f353e..03f33f2dc6 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -227,11 +227,7 @@ impl<'a> ContractModule<'a> { }; // Then check the signature. - // Both "call" and "deploy" has a [] -> [] or [] -> [i32] function type. - // - // The [] -> [] signature predates the [] -> [i32] signature and is supported for - // backwards compatibility. This will likely be removed once ink! is updated to - // generate modules with the new function signatures. + // Both "call" and "deploy" has a () -> () function type. let func_ty_idx = func_entries.get(fn_idx as usize) .ok_or_else(|| "export refers to non-existent function")? .type_ref(); diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 7b64117cd2..6d272ce929 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -16,14 +16,17 @@ //! Environment definition of the wasm smart-contract runtime. -use crate::{Schedule, Trait, CodeHash, BalanceOf}; +use crate::{Schedule, Trait, CodeHash, BalanceOf, Error}; use crate::exec::{ - Ext, ExecResult, ExecError, ExecReturnValue, StorageKey, TopicOf, STATUS_SUCCESS, + Ext, ExecResult, ExecReturnValue, StorageKey, TopicOf, ReturnFlags, }; use crate::gas::{Gas, GasMeter, Token, GasMeterResult}; +use crate::wasm::env_def::ConvertibleToWasm; use sp_sandbox; +use parity_wasm::elements::ValueType; use frame_system; -use sp_std::{prelude::*, mem, convert::TryInto}; +use frame_support::dispatch::DispatchError; +use sp_std::prelude::*; use codec::{Decode, Encode}; use sp_runtime::traits::{Bounded, SaturatedConversion}; use sp_io::hashing::{ @@ -33,20 +36,66 @@ use sp_io::hashing::{ sha2_256, }; -/// The value returned from ext_call and ext_instantiate contract external functions if the call or -/// instantiation traps. This value is chosen as if the execution does not trap, the return value -/// will always be an 8-bit integer, so 0x0100 is the smallest value that could not be returned. -const TRAP_RETURN_CODE: u32 = 0x0100; +/// Every error that can be returned from a runtime API call. +#[repr(u32)] +pub enum ReturnCode { + /// API call successful. + Success = 0, + /// The called function trapped and has its state changes reverted. + /// In this case no output buffer is returned. + /// Can only be returned from `ext_call` and `ext_instantiate`. + CalleeTrapped = 1, + /// The called function ran to completion but decided to revert its state. + /// An output buffer is returned when one was supplied. + /// Can only be returned from `ext_call` and `ext_instantiate`. + CalleeReverted = 2, + /// The passed key does not exist in storage. + KeyNotFound = 3, +} + +impl ConvertibleToWasm for ReturnCode { + type NativeType = Self; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_typed_value(self) -> sp_sandbox::Value { + sp_sandbox::Value::I32(self as i32) + } + fn from_typed_value(_: sp_sandbox::Value) -> Option { + debug_assert!(false, "We will never receive a ReturnCode but only send it to wasm."); + None + } +} + +impl From for ReturnCode { + fn from(from: ExecReturnValue) -> ReturnCode { + if from.flags.contains(ReturnFlags::REVERT) { + Self::CalleeReverted + } else { + Self::Success + } + } +} -/// Enumerates all possible *special* trap conditions. +/// The data passed through when a contract uses `ext_return`. +struct ReturnData { + /// The flags as passed through by the contract. They are still unchecked and + /// will later be parsed into a `ReturnFlags` bitflags struct. + flags: u32, + /// The output buffer passed by the contract as return data. + data: Vec, +} + +/// Enumerates all possible reasons why a trap was generated. /// -/// In this runtime traps used not only for signaling about errors but also -/// to just terminate quickly in some cases. -enum SpecialTrap { +/// This is either used to supply the caller with more information about why an error +/// occurred (the SupervisorError variant). +/// The other case is where the trap does not constitute an error but rather was invoked +/// as a quick way to terminate the application (all other variants). +enum TrapReason { + /// The supervisor trapped the contract because of an error condition occurred during + /// execution in privileged code. + SupervisorError(DispatchError), /// Signals that trap was generated in response to call `ext_return` host function. - Return(Vec), - /// Signals that trap was generated because the contract exhausted its gas limit. - OutOfGas, + Return(ReturnData), /// Signals that a trap was generated in response to a succesful call to the /// `ext_terminate` host function. Termination, @@ -57,11 +106,11 @@ enum SpecialTrap { /// Can only be used for one call. pub(crate) struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, - scratch_buf: Vec, + input_data: Option>, schedule: &'a Schedule, memory: sp_sandbox::Memory, gas_meter: &'a mut GasMeter, - special_trap: Option, + trap_reason: Option, } impl<'a, E: Ext + 'a> Runtime<'a, E> { pub(crate) fn new( @@ -73,12 +122,11 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { ) -> Self { Runtime { ext, - // Put the input data into the scratch buffer immediately. - scratch_buf: input_data, + input_data: Some(input_data), schedule, memory, gas_meter, - special_trap: None, + trap_reason: None, } } } @@ -87,53 +135,39 @@ pub(crate) fn to_execution_result( runtime: Runtime, sandbox_result: Result, ) -> ExecResult { - match runtime.special_trap { + match runtime.trap_reason { // The trap was the result of the execution `return` host function. - Some(SpecialTrap::Return(data)) => { + Some(TrapReason::Return(ReturnData{ flags, data })) => { + let flags = ReturnFlags::from_bits(flags).ok_or_else(|| + "used reserved bit in return flags" + )?; return Ok(ExecReturnValue { - status: STATUS_SUCCESS, + flags, data, }) }, - Some(SpecialTrap::Termination) => { + Some(TrapReason::Termination) => { return Ok(ExecReturnValue { - status: STATUS_SUCCESS, + flags: ReturnFlags::empty(), data: Vec::new(), }) }, - Some(SpecialTrap::Restoration) => { + Some(TrapReason::Restoration) => { return Ok(ExecReturnValue { - status: STATUS_SUCCESS, + flags: ReturnFlags::empty(), data: Vec::new(), }) } - Some(SpecialTrap::OutOfGas) => { - return Err(ExecError { - reason: "ran out of gas during contract execution".into(), - buffer: runtime.scratch_buf, - }) - }, + Some(TrapReason::SupervisorError(error)) => Err(error)?, None => (), } // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(sp_sandbox::ReturnValue::Unit) => { - let mut buffer = runtime.scratch_buf; - buffer.clear(); - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: buffer }) - } - Ok(sp_sandbox::ReturnValue::Value(sp_sandbox::Value::I32(exit_code))) => { - let status = (exit_code & 0xFF).try_into() - .expect("exit_code is masked into the range of a u8; qed"); - Ok(ExecReturnValue { status, data: runtime.scratch_buf }) + Ok(_) => { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } - // This should never happen as the return type of exported functions should have been - // validated by the code preparation process. However, because panics are really - // undesirable in the runtime code, we treat this as a trap for now. Eventually, we might - // want to revisit this. - Ok(_) => Err(ExecError { reason: "return type error".into(), buffer: runtime.scratch_buf }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -141,10 +175,10 @@ pub(crate) fn to_execution_result( // Because panics are really undesirable in the runtime code, we treat this as // a trap for now. Eventually, we might want to revisit this. Err(sp_sandbox::Error::Module) => - Err(ExecError { reason: "validation error".into(), buffer: runtime.scratch_buf }), + Err("validation error")?, // Any other kind of a trap should result in a failure. Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(ExecError { reason: "contract trapped during execution".into(), buffer: runtime.scratch_buf }), + Err("contract trapped during execution")?, } } @@ -213,13 +247,13 @@ impl Token for RuntimeToken { fn charge_gas>( gas_meter: &mut GasMeter, metadata: &Tok::Metadata, - special_trap: &mut Option, + trap_reason: &mut Option, token: Tok, ) -> Result<(), sp_sandbox::HostError> { match gas_meter.charge(metadata, token) { GasMeterResult::Proceed => Ok(()), GasMeterResult::OutOfGas => { - *special_trap = Some(SpecialTrap::OutOfGas); + *trap_reason = Some(TrapReason::SupervisorError(Error::::OutOfGas.into())); Err(sp_sandbox::HostError) }, } @@ -241,7 +275,7 @@ fn read_sandbox_memory( charge_gas( ctx.gas_meter, ctx.schedule, - &mut ctx.special_trap, + &mut ctx.trap_reason, RuntimeToken::ReadMemory(len), )?; @@ -250,31 +284,6 @@ fn read_sandbox_memory( Ok(buf) } -/// Read designated chunk from the sandbox memory into the scratch buffer, consuming an -/// appropriate amount of gas. Resizes the scratch buffer to the specified length on success. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - calculating the gas cost resulted in overflow. -/// - out of gas -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory_into_scratch( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result<(), sp_sandbox::HostError> { - charge_gas( - ctx.gas_meter, - ctx.schedule, - &mut ctx.special_trap, - RuntimeToken::ReadMemory(len), - )?; - - ctx.scratch_buf.resize(len as usize, 0); - ctx.memory.get(ptr, ctx.scratch_buf.as_mut_slice()).map_err(|_| sp_sandbox::HostError)?; - Ok(()) -} - /// Read designated chunk from the sandbox memory into the supplied buffer, consuming /// an appropriate amount of gas. /// @@ -291,7 +300,7 @@ fn read_sandbox_memory_into_buf( charge_gas( ctx.gas_meter, ctx.schedule, - &mut ctx.special_trap, + &mut ctx.trap_reason, RuntimeToken::ReadMemory(buf.len() as u32), )?; @@ -324,22 +333,67 @@ fn read_sandbox_memory_as( /// - calculating the gas cost resulted in overflow. /// - out of gas /// - designated area is not within the bounds of the sandbox memory. -fn write_sandbox_memory( - schedule: &Schedule, - special_trap: &mut Option, - gas_meter: &mut GasMeter, - memory: &sp_sandbox::Memory, +fn write_sandbox_memory( + ctx: &mut Runtime, ptr: u32, buf: &[u8], ) -> Result<(), sp_sandbox::HostError> { charge_gas( - gas_meter, - schedule, - special_trap, + ctx.gas_meter, + ctx.schedule, + &mut ctx.trap_reason, RuntimeToken::WriteMemory(buf.len() as u32), )?; - memory.set(ptr, buf)?; + ctx.memory.set(ptr, buf)?; + + Ok(()) +} + +/// Write the given buffer and its length to the designated locations in sandbox memory. +// +/// `out_ptr` is the location in sandbox memory where `buf` should be written to. +/// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the +/// lenght of the buffer located at `out_ptr`. If that buffer is large enough the actual +/// `buf.len()` is written to this location. +/// +/// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the +/// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying +/// output optional. For example to skip copying back the output buffer of an `ext_call` +/// when the caller is not interested in the result. +/// +/// In addition to the error conditions of `write_sandbox_memory` this functions returns +/// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. +fn write_sandbox_output( + ctx: &mut Runtime, + out_ptr: u32, + out_len_ptr: u32, + buf: &[u8], + allow_skip: bool, +) -> Result<(), sp_sandbox::HostError> { + if allow_skip && out_ptr == u32::max_value() { + return Ok(()); + } + + let buf_len = buf.len() as u32; + let len: u32 = read_sandbox_memory_as(ctx, out_len_ptr, 4)?; + + if len < buf_len { + ctx.trap_reason = Some(TrapReason::SupervisorError( + Error::::OutputBufferTooSmall.into() + )); + return Err(sp_sandbox::HostError); + } + + charge_gas( + ctx.gas_meter, + ctx.schedule, + &mut ctx.trap_reason, + RuntimeToken::WriteMemory(buf_len.saturating_add(4)), + )?; + + ctx.memory.set(out_ptr, buf)?; + ctx.memory.set(out_len_ptr, &buf_len.encode())?; Ok(()) } @@ -362,7 +416,7 @@ define_env!(Env, , charge_gas( &mut ctx.gas_meter, ctx.schedule, - &mut ctx.special_trap, + &mut ctx.trap_reason, RuntimeToken::Explicit(amount) )?; Ok(()) @@ -407,29 +461,37 @@ define_env!(Env, , Ok(()) }, - // Retrieve the value under the given key from the storage and return 0. - // If there is no entry under the given key then this function will return 1 and - // clear the scratch buffer. + // Retrieve the value under the given key from storage. // - // - key_ptr: pointer into the linear memory where the key - // of the requested value is placed. - ext_get_storage(ctx, key_ptr: u32) -> u32 => { + // # Parameters + // + // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + // - `out_ptr`: pointer to the linear memory where the value is written to. + // - `out_len_ptr`: in-out pointer into linear memory where the buffer length + // is read from and the value length is written to. + // + // # Errors + // + // If there is no entry under the given key then this function will return + // `ReturnCode::KeyNotFound`. + // + // # Traps + // + // Traps if the supplied buffer length is smaller than the size of the stored value. + ext_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { let mut key: StorageKey = [0; 32]; read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; if let Some(value) = ctx.ext.get_storage(&key) { - ctx.scratch_buf = value; - Ok(0) + write_sandbox_output(ctx, out_ptr, out_len_ptr, &value, false)?; + Ok(ReturnCode::Success) } else { - ctx.scratch_buf.clear(); - Ok(1) + Ok(ReturnCode::KeyNotFound) } }, // Transfer some value to another account. // - // If the value transfer was succesful zero is returned. Otherwise one is returned. - // The scratch buffer is not touched. The receiver can be a plain account or - // a contract. + // # Parameters // // - account_ptr: a pointer to the address of the beneficiary account // Should be decodable as an `T::AccountId`. Traps otherwise. @@ -437,38 +499,34 @@ define_env!(Env, , // - value_ptr: a pointer to the buffer with value, how much value to send. // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. + // + // # Traps + // + // Traps if the transfer wasn't succesful. This can happen when the value transfered + // brings the sender below the existential deposit. Use `ext_terminate` to remove + // the caller contract. ext_transfer( ctx, account_ptr: u32, account_len: u32, value_ptr: u32, value_len: u32 - ) -> u32 => { + ) => { let callee: <::T as frame_system::Trait>::AccountId = read_sandbox_memory_as(ctx, account_ptr, account_len)?; let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - match ctx.ext.transfer(&callee, value, ctx.gas_meter) { - Ok(_) => Ok(0), - Err(_) => Ok(1), - } + ctx.ext.transfer(&callee, value, ctx.gas_meter).map_err(|_| sp_sandbox::HostError) }, // Make a call to another contract. // - // If the called contract runs to completion, then this returns the status code the callee - // returns on exit in the bottom 8 bits of the return value. The top 24 bits are 0s. A status - // code of 0 indicates success, and any other code indicates a failure. On failure, any state - // changes made by the called contract are reverted. The scratch buffer is filled with the - // output data returned by the called contract, even in the case of a failure status. + // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. + // The copy of the output buffer can be skipped by supplying the sentinel value + // of `u32::max_value()` to `output_ptr`. // - // This call fails if it would bring the calling contract below the existential deposit. - // In order to destroy a contract `ext_terminate` must be used. - // - // If the contract traps during execution or otherwise fails to complete successfully, then - // this function clears the scratch buffer and returns 0x0100. As with a failure status, any - // state changes made by the called contract are reverted. + // # Parameters // // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. @@ -479,6 +537,23 @@ define_env!(Env, , // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. // - input_data_len: length of the input data buffer. + // - output_ptr: a pointer where the output buffer is copied to. + // - output_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // + // # Errors + // + // `ReturnCode::CalleeReverted`: The callee ran to completion but decided to have its + // changes reverted. The delivery of the output buffer is still possible. + // `ReturnCode::CalleeTrapped`: The callee trapped during execution. All changes are reverted + // and no output buffer is delivered. + // + // # Traps + // + // - Transfer of balance failed. This call can not bring the sender below the existential + // deposit. Use `ext_terminate` to remove the caller. + // - Callee does not exist. + // - Supplied output buffer is too small. ext_call( ctx, callee_ptr: u32, @@ -487,16 +562,14 @@ define_env!(Env, , value_ptr: u32, value_len: u32, input_data_ptr: u32, - input_data_len: u32 - ) -> u32 => { + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> ReturnCode => { let callee: <::T as frame_system::Trait>::AccountId = read_sandbox_memory_as(ctx, callee_ptr, callee_len)?; - let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; - - // Read input data into the scratch buffer, then take ownership of it. - read_sandbox_memory_into_scratch(ctx, input_data_ptr, input_data_len)?; - let input_data = mem::replace(&mut ctx.scratch_buf, Vec::new()); + let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; + let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() @@ -513,22 +586,20 @@ define_env!(Env, , nested_meter, input_data, ) - .map_err(|err| err.buffer) + .map_err(|_| ()) } // there is not enough gas to allocate for the nested call. - None => Err(input_data), + None => Err(()), } }); match call_outcome { Ok(output) => { - ctx.scratch_buf = output.data; - Ok(output.status.into()) + write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; + Ok(output.into()) }, - Err(buffer) => { - ctx.scratch_buf = buffer; - ctx.scratch_buf.clear(); - Ok(TRAP_RETURN_CODE) + Err(_) => { + Ok(ReturnCode::CalleeTrapped) }, } }, @@ -536,29 +607,14 @@ define_env!(Env, , // Instantiate a contract with the specified code hash. // // This function creates an account and executes the constructor defined in the code specified - // by the code hash. - // - // If the constructor runs to completion, then this returns the status code that the newly - // instantiated contract returns on exit in the bottom 8 bits of the return value. The top 24 - // bits are 0s. A status code of 0 indicates success, and any other code indicates a failure. - // On failure, any state changes made by the called contract are reverted and the contract is - // not instantiated. On a success status, the scratch buffer is filled with the encoded address - // of the newly instantiated contract. In the case of a failure status, the scratch buffer is - // cleared. + // by the code hash. The address of this new account is copied to `address_ptr` and its length + // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its + // length to `output_len_ptr`. // - // This call fails if it would bring the calling contract below the existential deposit. - // In order to destroy a contract `ext_terminate` must be used. + // The copy of the output buffer and address can be skipped by supplying the sentinel value + // of `u32::max_value()` to `output_ptr` or `address_ptr`. // - // If the contract traps during execution or otherwise fails to complete successfully, then - // this function clears the scratch buffer and returns 0x0100. As with a failure status, any - // state changes made by the called contract are reverted. - - // This function creates an account and executes initializer code. After the execution, - // the returned buffer is saved as the code of the created account. - // - // Returns 0 on the successful contract instantiation and puts the address of the instantiated - // contract into the scratch buffer. Otherwise, returns non-zero value and clears the scratch - // buffer. + // # Parameters // // - code_hash_ptr: a pointer to the buffer that contains the initializer code. // - code_hash_len: length of the initializer code buffer. @@ -568,6 +624,28 @@ define_env!(Env, , // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. // - input_data_len: length of the input data buffer. + // - address_ptr: a pointer where the new account's address is copied to. + // - address_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // - output_ptr: a pointer where the output buffer is copied to. + // - output_len_ptr: in-out pointer to where the length of the buffer is read from + // and the actual length is written to. + // + // # Errors + // + // `ReturnCode::CalleeReverted`: The callee's constructor ran to completion but decided to have + // its changes reverted. The delivery of the output buffer is still possible but the + // account was not created and no address is returned. + // `ReturnCode::CalleeTrapped`: The callee trapped during execution. All changes are reverted + // and no output buffer is delivered. The accounts was not created and no address is + // returned. + // + // # Traps + // + // - Transfer of balance failed. This call can not bring the sender below the existential + // deposit. Use `ext_terminate` to remove the caller. + // - Code hash does not exist. + // - Supplied output buffers are too small. ext_instantiate( ctx, code_hash_ptr: u32, @@ -576,16 +654,16 @@ define_env!(Env, , value_ptr: u32, value_len: u32, input_data_ptr: u32, - input_data_len: u32 - ) -> u32 => { + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> ReturnCode => { let code_hash: CodeHash<::T> = read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; - let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; - - // Read input data into the scratch buffer, then take ownership of it. - read_sandbox_memory_into_scratch(ctx, input_data_ptr, input_data_len)?; - let input_data = mem::replace(&mut ctx.scratch_buf, Vec::new()); + let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; + let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() @@ -602,27 +680,24 @@ define_env!(Env, , nested_meter, input_data ) - .map_err(|err| err.buffer) + .map_err(|_| ()) } // there is not enough gas to allocate for the nested call. - None => Err(input_data), + None => Err(()), } }); match instantiate_outcome { Ok((address, output)) => { - let is_success = output.is_success(); - ctx.scratch_buf = output.data; - ctx.scratch_buf.clear(); - if is_success { - // Write the address to the scratch buffer. - address.encode_to(&mut ctx.scratch_buf); + if !output.flags.contains(ReturnFlags::REVERT) { + write_sandbox_output( + ctx, address_ptr, address_len_ptr, &address.encode(), true + )?; } - Ok(output.status.into()) + write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true)?; + Ok(output.into()) }, - Err(buffer) => { - ctx.scratch_buf = buffer; - ctx.scratch_buf.clear(); - Ok(TRAP_RETURN_CODE) + Err(_) => { + Ok(ReturnCode::CalleeTrapped) }, } }, @@ -646,27 +721,48 @@ define_env!(Env, , read_sandbox_memory_as(ctx, beneficiary_ptr, beneficiary_len)?; if let Ok(_) = ctx.ext.terminate(&beneficiary, ctx.gas_meter) { - ctx.special_trap = Some(SpecialTrap::Termination); + ctx.trap_reason = Some(TrapReason::Termination); } Err(sp_sandbox::HostError) }, - // Save a data buffer as a result of the execution, terminate the execution and return a - // successful result to the caller. + ext_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { + if let Some(input) = ctx.input_data.take() { + write_sandbox_output(ctx, buf_ptr, buf_len_ptr, &input, false) + } else { + Err(sp_sandbox::HostError) + } + }, + + // Cease contract execution and save a data buffer as a result of the execution. + // + // This function never retuns as it stops execution of the caller. + // This is the only way to return a data buffer to the caller. Returning from + // execution without calling this function is equivalent to calling: + // ``` + // ext_return(0, 0, 0); + // ``` // - // This is the only way to return a data buffer to the caller. - ext_return(ctx, data_ptr: u32, data_len: u32) => { + // The flags argument is a bitfield that can be used to signal special return + // conditions to the supervisor: + // --- lsb --- + // bit 0 : REVERT - Revert all storage changes made by the caller. + // bit [1, 31]: Reserved for future use. + // --- msb --- + // + // Using a reserved bit triggers a trap. + ext_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { charge_gas( ctx.gas_meter, ctx.schedule, - &mut ctx.special_trap, + &mut ctx.trap_reason, RuntimeToken::ReturnData(data_len) )?; - read_sandbox_memory_into_scratch(ctx, data_ptr, data_len)?; - let output_buf = mem::replace(&mut ctx.scratch_buf, Vec::new()); - - ctx.special_trap = Some(SpecialTrap::Return(output_buf)); + ctx.trap_reason = Some(TrapReason::Return(ReturnData { + flags, + data: read_sandbox_memory(ctx, data_ptr, data_len)?, + })); // The trap mechanism is used to immediately terminate the execution. // This trap should be handled appropriately before returning the result @@ -674,100 +770,131 @@ define_env!(Env, , Err(sp_sandbox::HostError) }, - // Stores the address of the caller into the scratch buffer. + // Stores the address of the caller into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. // // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the - // address of the contract will be returned. - ext_caller(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.caller().encode_to(&mut ctx.scratch_buf); - Ok(()) + // address of the contract will be returned. The value is encoded as T::AccountId. + ext_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false) }, - // Stores the address of the current contract into the scratch buffer. - ext_address(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.address().encode_to(&mut ctx.scratch_buf); - Ok(()) + // Stores the address of the current contract into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + ext_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.address().encode(), false) }, - // Stores the price for the specified amount of gas in scratch buffer. + // Stores the price for the specified amount of gas into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + // + // # Note // - // The data is encoded as T::Balance. The current contents of the scratch buffer are overwritten. // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. - ext_gas_price(ctx, gas: u64) => { - ctx.scratch_buf.clear(); - ctx.ext.get_weight_price(gas).encode_to(&mut ctx.scratch_buf); - Ok(()) + ext_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output( + ctx, out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false + ) }, - // Stores the amount of gas left into the scratch buffer. + // Stores the amount of gas left into the supplied buffer. // - // The data is encoded as Gas. The current contents of the scratch buffer are overwritten. - ext_gas_left(ctx) => { - ctx.scratch_buf.clear(); - ctx.gas_meter.gas_left().encode_to(&mut ctx.scratch_buf); - Ok(()) + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as Gas. + ext_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false) }, - // Stores the balance of the current account into the scratch buffer. + // Stores the balance of the current account into the supplied buffer. // - // The data is encoded as T::Balance. The current contents of the scratch buffer are overwritten. - ext_balance(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.balance().encode_to(&mut ctx.scratch_buf); - Ok(()) + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + ext_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false) }, - // Stores the value transferred along with this call or as endowment into the scratch buffer. + // Stores the value transferred along with this call or as endowment into the supplied buffer. // - // The data is encoded as T::Balance. The current contents of the scratch buffer are overwritten. - ext_value_transferred(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.value_transferred().encode_to(&mut ctx.scratch_buf); - Ok(()) + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + ext_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output( + ctx, out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false + ) }, - // Stores the random number for the current block for the given subject into the scratch - // buffer. + // Stores a random number for the current block and the given subject into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. // - // The data is encoded as T::Hash. The current contents of the scratch buffer are - // overwritten. - ext_random(ctx, subject_ptr: u32, subject_len: u32) => { + // The data is encoded as T::Hash. + ext_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { // The length of a subject can't exceed `max_subject_len`. if subject_len > ctx.schedule.max_subject_len { return Err(sp_sandbox::HostError); } - let subject_buf = read_sandbox_memory(ctx, subject_ptr, subject_len)?; - ctx.scratch_buf.clear(); - ctx.ext.random(&subject_buf).encode_to(&mut ctx.scratch_buf); - Ok(()) + write_sandbox_output( + ctx, out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false + ) }, - // Load the latest block timestamp into the scratch buffer - ext_now(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.now().encode_to(&mut ctx.scratch_buf); - Ok(()) + // Load the latest block timestamp into the supplied buffer + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + ext_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.now().encode(), false) }, - // Stores the minimum balance (a.k.a. existential deposit) into the scratch buffer. + // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. // - // The data is encoded as T::Balance. The current contents of the scratch buffer are - // overwritten. - ext_minimum_balance(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.minimum_balance().encode_to(&mut ctx.scratch_buf); - Ok(()) + // The data is encoded as T::Balance. + ext_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false) }, - // Stores the tombstone deposit into the scratch buffer. + // Stores the tombstone deposit into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. // - // The data is encoded as T::Balance. The current contents of the scratch - // buffer are overwritten. + // The data is encoded as T::Balance. // // # Note // @@ -775,10 +902,10 @@ define_env!(Env, , // a contract to leave a tombstone the balance of the contract must not go // below the sum of existential deposit and the tombstone deposit. The sum // is commonly referred as subsistence threshold in code. - ext_tombstone_deposit(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.tombstone_deposit().encode_to(&mut ctx.scratch_buf); - Ok(()) + ext_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output( + ctx, out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false + ) }, // Try to restore the given destination contract sacrificing the caller. @@ -846,59 +973,11 @@ define_env!(Env, , rent_allowance, delta, ) { - ctx.special_trap = Some(SpecialTrap::Restoration); + ctx.trap_reason = Some(TrapReason::Restoration); } Err(sp_sandbox::HostError) }, - // Returns the size of the scratch buffer. - // - // For more details on the scratch buffer see `ext_scratch_read`. - ext_scratch_size(ctx) -> u32 => { - Ok(ctx.scratch_buf.len() as u32) - }, - - // Copy data from the scratch buffer starting from `offset` with length `len` into the contract - // memory. The region at which the data should be put is specified by `dest_ptr`. - // - // In order to get size of the scratch buffer use `ext_scratch_size`. At the start of contract - // execution, the scratch buffer is filled with the input data. Whenever a contract calls - // function that uses the scratch buffer the contents of the scratch buffer are overwritten. - ext_scratch_read(ctx, dest_ptr: u32, offset: u32, len: u32) => { - let offset = offset as usize; - if offset > ctx.scratch_buf.len() { - // Offset can't be larger than scratch buffer length. - return Err(sp_sandbox::HostError); - } - - // This can't panic since `offset <= ctx.scratch_buf.len()`. - let src = &ctx.scratch_buf[offset..]; - if src.len() != len as usize { - return Err(sp_sandbox::HostError); - } - - // Finally, perform the write. - write_sandbox_memory( - ctx.schedule, - &mut ctx.special_trap, - ctx.gas_meter, - &ctx.memory, - dest_ptr, - src, - )?; - - Ok(()) - }, - - // Copy data from contract memory starting from `src_ptr` with length `len` into the scratch - // buffer. This overwrites the entire scratch buffer and resizes to `len`. Specifying a `len` - // of zero clears the scratch buffer. - // - // This should be used before exiting a call or instantiation in order to set the return data. - ext_scratch_write(ctx, src_ptr: u32, len: u32) => { - read_sandbox_memory_into_scratch(ctx, src_ptr, len) - }, - // Deposit a contract event with the data buffer and optional list of topics. There is a limit // on the maximum number of topics specified by `max_event_topics`. // @@ -928,7 +1007,7 @@ define_env!(Env, , charge_gas( ctx.gas_meter, ctx.schedule, - &mut ctx.special_trap, + &mut ctx.trap_reason, RuntimeToken::DepositEvent(topics.len() as u32, data_len) )?; ctx.ext.deposit_event(topics, event_data); @@ -949,14 +1028,16 @@ define_env!(Env, , Ok(()) }, - // Stores the rent allowance into the scratch buffer. + // Stores the rent allowance into the supplied buffer. // - // The data is encoded as T::Balance. The current contents of the scratch buffer are overwritten. - ext_rent_allowance(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.rent_allowance().encode_to(&mut ctx.scratch_buf); - - Ok(()) + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as T::Balance. + ext_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false) }, // Prints utf8 encoded string from the data buffer. @@ -970,11 +1051,14 @@ define_env!(Env, , Ok(()) }, - // Stores the current block number of the current contract into the scratch buffer. - ext_block_number(ctx) => { - ctx.scratch_buf.clear(); - ctx.ext.block_number().encode_to(&mut ctx.scratch_buf); - Ok(()) + // Stores the current block number of the current contract into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + ext_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + write_sandbox_output(ctx, out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false) }, // Computes the SHA2 256-bit hash on the given input buffer. @@ -1074,7 +1158,7 @@ define_env!(Env, , }, ); -/// Computes the given hash function on the scratch buffer. +/// Computes the given hash function on the supplied input. /// /// Reads from the sandboxed input buffer into an intermediate buffer. /// Returns the result directly to the output buffer of the sandboxed memory. @@ -1098,17 +1182,13 @@ where F: FnOnce(&[u8]) -> R, R: AsRef<[u8]>, { - // Copy the input buffer directly into the scratch buffer to avoid - // heap allocations. + // Copy input into supervisor memory. let input = read_sandbox_memory(ctx, input_ptr, input_len)?; - // Compute the hash on the scratch buffer using the given hash function. + // Compute the hash on the input buffer using the given hash function. let hash = hash_fn(&input); // Write the resulting hash back into the sandboxed output buffer. write_sandbox_memory( - ctx.schedule, - &mut ctx.special_trap, - ctx.gas_meter, - &ctx.memory, + ctx, output_ptr, hash.as_ref(), )?; -- GitLab