// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Polkadot is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . //! Polkadot service. Specialized wrapper over substrate service. #![deny(unused_results)] pub mod chain_spec; mod grandpa_support; mod client; #[cfg(feature = "full-node")] use { std::convert::TryInto, std::time::Duration, tracing::info, polkadot_node_core_av_store::Config as AvailabilityConfig, polkadot_node_core_av_store::Error as AvailabilityError, polkadot_node_core_approval_voting::Config as ApprovalVotingConfig, polkadot_node_core_proposer::ProposerFactory, polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler}, polkadot_primitives::v1::ParachainHost, sc_authority_discovery::Service as AuthorityDiscoveryService, sp_blockchain::HeaderBackend, sp_trie::PrefixedMemoryDB, sc_client_api::{AuxStore, ExecutorProvider}, sc_keystore::LocalKeystore, babe_primitives::BabeApi, grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}, sp_runtime::traits::Header as HeaderT, }; #[cfg(feature = "real-overseer")] use polkadot_network_bridge::RequestMultiplexer; use sp_core::traits::SpawnNamed; use polkadot_subsystem::jaeger; use std::sync::Arc; use prometheus_endpoint::Registry; use sc_executor::native_executor_instance; use service::RpcHandlers; use telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; pub use self::client::{AbstractClient, Client, ClientHandle, ExecuteWithClient, RuntimeApiCollection}; pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec, RococoChainSpec}; pub use consensus_common::{Proposal, SelectChain, BlockImport, block_validation::Chain}; pub use polkadot_parachain::wasm_executor::IsolationStrategy; pub use polkadot_primitives::v1::{Block, BlockId, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor}; pub use sc_consensus::LongestChain; pub use sc_executor::NativeExecutionDispatch; pub use service::{ Role, PruningMode, TransactionPoolOptions, Error as SubstrateServiceError, RuntimeGenesis, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, Configuration, ChainSpec, TaskManager, }; pub use service::config::{DatabaseConfig, PrometheusConfig}; pub use sp_api::{ApiRef, Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend}; pub use sp_runtime::traits::{DigestFor, HashFor, NumberFor, Block as BlockT, self as runtime_traits, BlakeTwo256}; pub use kusama_runtime; pub use polkadot_runtime; pub use rococo_runtime; pub use westend_runtime; /// The maximum number of active leaves we forward to the [`Overseer`] on startup. const MAX_ACTIVE_LEAVES: usize = 4; native_executor_instance!( pub PolkadotExecutor, polkadot_runtime::api::dispatch, polkadot_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); native_executor_instance!( pub KusamaExecutor, kusama_runtime::api::dispatch, kusama_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); native_executor_instance!( pub WestendExecutor, westend_runtime::api::dispatch, westend_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); native_executor_instance!( pub RococoExecutor, rococo_runtime::api::dispatch, rococo_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); #[derive(thiserror::Error, Debug)] pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] AddrFormatInvalid(#[from] std::net::AddrParseError), #[error(transparent)] Sub(#[from] SubstrateServiceError), #[error(transparent)] Blockchain(#[from] sp_blockchain::Error), #[error(transparent)] Consensus(#[from] consensus_common::Error), #[error("Failed to create an overseer")] Overseer(#[from] polkadot_overseer::SubsystemError), #[error(transparent)] Prometheus(#[from] prometheus_endpoint::PrometheusError), #[error(transparent)] Telemetry(#[from] telemetry::Error), #[error(transparent)] Jaeger(#[from] polkadot_subsystem::jaeger::JaegerError), #[cfg(feature = "full-node")] #[error(transparent)] Availability(#[from] AvailabilityError), #[error("Authorities require the real overseer implementation")] AuthoritiesRequireRealOverseer, #[cfg(feature = "full-node")] #[error("Creating a custom database is required for validators")] DatabasePathRequired, } /// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network. pub trait IdentifyVariant { /// Returns if this is a configuration for the `Kusama` network. fn is_kusama(&self) -> bool; /// Returns if this is a configuration for the `Westend` network. fn is_westend(&self) -> bool; /// Returns if this is a configuration for the `Rococo` network. fn is_rococo(&self) -> bool; /// Returns if this is a configuration for the `Polkadot` network. fn is_polkadot(&self) -> bool; /// Returns true if this configuration is for a development network. fn is_dev(&self) -> bool; } impl IdentifyVariant for Box { fn is_kusama(&self) -> bool { self.id().starts_with("kusama") || self.id().starts_with("ksm") } fn is_westend(&self) -> bool { self.id().starts_with("westend") || self.id().starts_with("wnd") } fn is_rococo(&self) -> bool { self.id().starts_with("rococo") || self.id().starts_with("rco") } fn is_polkadot(&self) -> bool { self.id().starts_with("polkadot") || self.id().starts_with("dot") } fn is_dev(&self) -> bool { self.id().ends_with("dev") } } // If we're using prometheus, use a registry with a prefix of `polkadot`. fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> { if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { *registry = Registry::new_custom(Some("polkadot".into()), None)?; } Ok(()) } /// Initialize the `Jeager` collector. The destination must listen /// on the given address and port for `UDP` packets. fn jaeger_launch_collector_with_agent(spawner: impl SpawnNamed, config: &Configuration, agent: Option) -> Result<(), Error> { if let Some(agent) = agent { let cfg = jaeger::JaegerConfig::builder() .agent(agent) .named(&config.network.node_name) .build(); jaeger::Jaeger::new(cfg).launch(spawner)?; } Ok(()) } pub type FullBackend = service::TFullBackend; #[cfg(feature = "full-node")] type FullSelectChain = sc_consensus::LongestChain; pub type FullClient = service::TFullClient; #[cfg(feature = "full-node")] type FullGrandpaBlockImport = grandpa::GrandpaBlockImport< FullBackend, Block, FullClient, FullSelectChain >; type LightBackend = service::TLightBackendWithHash; type LightClient = service::TLightClientWithBackend; #[cfg(feature = "full-node")] fn new_partial( config: &mut Configuration, jaeger_agent: Option, telemetry_worker_handle: Option, ) -> Result< service::PartialComponents< FullClient, FullBackend, FullSelectChain, consensus_common::DefaultImportQueue>, sc_transaction_pool::FullPool>, ( impl Fn( polkadot_rpc::DenyUnsafe, polkadot_rpc::SubscriptionTaskExecutor, ) -> polkadot_rpc::RpcExtension, ( babe::BabeBlockImport< Block, FullClient, FullGrandpaBlockImport >, grandpa::LinkHalf, FullSelectChain>, babe::BabeLink ), grandpa::SharedVoterState, std::time::Duration, // slot-duration Option, ) >, Error > where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { set_prometheus_registry(config)?; let inherent_data_providers = inherents::InherentDataProviders::new(); let telemetry = config.telemetry_endpoints.clone() .filter(|x| !x.is_empty()) .map(move |endpoints| -> Result<_, telemetry::Error> { let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle { (None, worker_handle) } else { let worker = TelemetryWorker::new(16)?; let worker_handle = worker.handle(); (Some(worker), worker_handle) }; let telemetry = worker_handle.new_telemetry(endpoints); Ok((worker, telemetry)) }) .transpose()?; let (client, backend, keystore_container, task_manager) = service::new_full_parts::( &config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; let client = Arc::new(client); let telemetry = telemetry .map(|(worker, telemetry)| { if let Some(worker) = worker { task_manager.spawn_handle().spawn("telemetry", worker.run()); } telemetry }); jaeger_launch_collector_with_agent(task_manager.spawn_handle(), &*config, jaeger_agent)?; let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), config.role.is_authority().into(), config.prometheus_registry(), task_manager.spawn_handle(), client.clone(), ); let grandpa_hard_forks = if config.chain_spec.is_kusama() { grandpa_support::kusama_hard_forks() } else { Vec::new() }; let (grandpa_block_import, grandpa_link) = grandpa::block_import_with_authority_set_hard_forks( client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), grandpa_hard_forks, telemetry.as_ref().map(|x| x.handle()), )?; let justification_import = grandpa_block_import.clone(); let babe_config = babe::Config::get_or_compute(&*client)?; let (block_import, babe_link) = babe::block_import( babe_config.clone(), grandpa_block_import, client.clone(), )?; let import_queue = babe::import_queue( babe_link.clone(), block_import.clone(), Some(Box::new(justification_import)), client.clone(), select_chain.clone(), inherent_data_providers.clone(), &task_manager.spawn_essential_handle(), config.prometheus_registry(), consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()), telemetry.as_ref().map(|x| x.handle()), )?; let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); let shared_voter_state = grandpa::SharedVoterState::empty(); let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( backend.clone(), Some(shared_authority_set.clone()), ); let import_setup = (block_import.clone(), grandpa_link, babe_link.clone()); let rpc_setup = shared_voter_state.clone(); let shared_epoch_changes = babe_link.epoch_changes().clone(); let slot_duration = babe_config.slot_duration(); let rpc_extensions_builder = { let client = client.clone(); let keystore = keystore_container.sync_keystore(); let transaction_pool = transaction_pool.clone(); let select_chain = select_chain.clone(); let chain_spec = config.chain_spec.cloned_box(); move |deny_unsafe, subscription_executor| -> polkadot_rpc::RpcExtension { let deps = polkadot_rpc::FullDeps { client: client.clone(), pool: transaction_pool.clone(), select_chain: select_chain.clone(), chain_spec: chain_spec.cloned_box(), deny_unsafe, babe: polkadot_rpc::BabeDeps { babe_config: babe_config.clone(), shared_epoch_changes: shared_epoch_changes.clone(), keystore: keystore.clone(), }, grandpa: polkadot_rpc::GrandpaDeps { shared_voter_state: shared_voter_state.clone(), shared_authority_set: shared_authority_set.clone(), justification_stream: justification_stream.clone(), subscription_executor, finality_provider: finality_proof_provider.clone(), }, }; polkadot_rpc::create_full(deps) } }; Ok(service::PartialComponents { client, backend, task_manager, keystore_container, select_chain, import_queue, transaction_pool, inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry) }) } #[cfg(all(feature="full-node", not(feature = "real-overseer")))] fn real_overseer( leaves: impl IntoIterator, _: Arc, _: Arc, _: AvailabilityConfig, _: Arc>, _: AuthorityDiscoveryService, _request_multiplexer: (), registry: Option<&Registry>, spawner: Spawner, _: IsCollator, _: IsolationStrategy, _: ApprovalVotingConfig, ) -> Result<(Overseer, OverseerHandler), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, RuntimeClient::Api: ParachainHost + BabeApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { Overseer::new( leaves, AllSubsystems::<()>::dummy(), registry, spawner, ).map_err(|e| e.into()) } #[cfg(all(feature = "full-node", feature = "real-overseer"))] fn real_overseer( leaves: impl IntoIterator, keystore: Arc, runtime_client: Arc, availability_config: AvailabilityConfig, network_service: Arc>, authority_discovery: AuthorityDiscoveryService, request_multiplexer: RequestMultiplexer, registry: Option<&Registry>, spawner: Spawner, is_collator: IsCollator, isolation_strategy: IsolationStrategy, approval_voting_config: ApprovalVotingConfig, ) -> Result<(Overseer, OverseerHandler), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, RuntimeClient::Api: ParachainHost + BabeApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { use polkadot_node_subsystem_util::metrics::Metrics; use polkadot_availability_distribution::AvailabilityDistributionSubsystem; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem; use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem; use polkadot_node_core_backing::CandidateBackingSubsystem; use polkadot_node_core_candidate_selection::CandidateSelectionSubsystem; use polkadot_node_core_candidate_validation::CandidateValidationSubsystem; use polkadot_node_core_chain_api::ChainApiSubsystem; use polkadot_node_collation_generation::CollationGenerationSubsystem; use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem; use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem; use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; use polkadot_node_core_approval_voting::ApprovalVotingSubsystem; use polkadot_gossip_support::GossipSupport as GossipSupportSubsystem; let all_subsystems = AllSubsystems { availability_distribution: AvailabilityDistributionSubsystem::new( keystore.clone(), Metrics::register(registry)?, ), availability_recovery: AvailabilityRecoverySubsystem::with_chunks_only( ), availability_store: AvailabilityStoreSubsystem::new_on_disk( availability_config, Metrics::register(registry)?, )?, bitfield_distribution: BitfieldDistributionSubsystem::new( Metrics::register(registry)?, ), bitfield_signing: BitfieldSigningSubsystem::new( spawner.clone(), keystore.clone(), Metrics::register(registry)?, ), candidate_backing: CandidateBackingSubsystem::new( spawner.clone(), keystore.clone(), Metrics::register(registry)?, ), candidate_selection: CandidateSelectionSubsystem::new( spawner.clone(), keystore.clone(), Metrics::register(registry)?, ), candidate_validation: CandidateValidationSubsystem::new( spawner.clone(), Metrics::register(registry)?, isolation_strategy, ), chain_api: ChainApiSubsystem::new( runtime_client.clone(), Metrics::register(registry)?, ), collation_generation: CollationGenerationSubsystem::new( Metrics::register(registry)?, ), collator_protocol: { let side = match is_collator { IsCollator::Yes(collator_pair) => ProtocolSide::Collator( network_service.local_peer_id().clone(), collator_pair, Metrics::register(registry)?, ), IsCollator::No => ProtocolSide::Validator(Default::default(),Metrics::register(registry)?), }; CollatorProtocolSubsystem::new( side, ) }, network_bridge: NetworkBridgeSubsystem::new( network_service, authority_discovery, request_multiplexer, ), provisioner: ProvisionerSubsystem::new( spawner.clone(), (), Metrics::register(registry)?, ), runtime_api: RuntimeApiSubsystem::new( runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), ), statement_distribution: StatementDistributionSubsystem::new( Metrics::register(registry)?, ), approval_distribution: ApprovalDistributionSubsystem::new( Metrics::register(registry)?, ), approval_voting: ApprovalVotingSubsystem::with_config( approval_voting_config, keystore.clone(), Metrics::register(registry)?, )?, gossip_support: GossipSupportSubsystem::new(), }; Overseer::new( leaves, all_subsystems, registry, spawner, ).map_err(|e| e.into()) } #[cfg(feature = "full-node")] pub struct NewFull { pub task_manager: TaskManager, pub client: C, pub overseer_handler: Option, pub network: Arc::Hash>>, pub network_status_sinks: service::NetworkStatusSinks, pub rpc_handlers: RpcHandlers, pub backend: Arc, } #[cfg(feature = "full-node")] impl NewFull { /// Convert the client type using the given `func`. pub fn with_client(self, func: impl FnOnce(C) -> NC) -> NewFull { NewFull { client: func(self.client), task_manager: self.task_manager, overseer_handler: self.overseer_handler, network: self.network, network_status_sinks: self.network_status_sinks, rpc_handlers: self.rpc_handlers, backend: self.backend, } } } /// Is this node a collator? #[cfg(feature = "full-node")] #[derive(Clone)] pub enum IsCollator { /// This node is a collator. Yes(CollatorPair), /// This node is not a collator. No, } #[cfg(feature = "full-node")] impl std::fmt::Debug for IsCollator { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { use sp_core::Pair; match self { IsCollator::Yes(pair) => write!(fmt, "Yes({})", pair.public()), IsCollator::No => write!(fmt, "No"), } } } #[cfg(feature = "full-node")] impl IsCollator { /// Is this a collator? fn is_collator(&self) -> bool { matches!(self, Self::Yes(_)) } } /// Returns the active leaves the overseer should start with. #[cfg(feature = "full-node")] fn active_leaves( select_chain: &sc_consensus::LongestChain, client: &FullClient, ) -> Result, Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { let best_block = select_chain.best_chain()?; let mut leaves = select_chain .leaves() .unwrap_or_default() .into_iter() .filter_map(|hash| { let number = client.number(hash).ok()??; // Only consider leaves that are in maximum an uncle of the best block. if number < best_block.number().saturating_sub(1) { return None } else if hash == best_block.hash() { return None }; let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash; Some(BlockInfo { hash, parent_hash, number, }) }) .collect::>(); // Sort by block number and get the maximum number of leaves leaves.sort_by_key(|b| b.number); leaves.push(BlockInfo { hash: best_block.hash(), parent_hash: *best_block.parent_hash(), number: *best_block.number(), }); Ok(leaves.into_iter().rev().take(MAX_ACTIVE_LEAVES).collect()) } /// Create a new full node of arbitrary runtime and executor. /// /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. #[cfg(feature = "full-node")] pub fn new_full( mut config: Configuration, is_collator: IsCollator, grandpa_pause: Option<(u32, u32)>, jaeger_agent: Option, isolation_strategy: IsolationStrategy, telemetry_worker_handle: Option, ) -> Result>>, Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { #[cfg(feature = "real-overseer")] info!("real-overseer feature is ENABLED"); let role = config.role.clone(); let force_authoring = config.force_authoring; let backoff_authoring_blocks = { let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging { #[cfg(feature = "real-overseer")] unfinalized_slack: 100, ..Default::default() }; if config.chain_spec.is_rococo() { // it's a testnet that's in flux, finality has stalled sometimes due // to operational issues and it's annoying to slow down block // production to 1 block per hour. backoff.max_interval = 10; } Some(backoff) }; let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); let service::PartialComponents { client, backend, mut task_manager, keystore_container, select_chain, import_queue, transaction_pool, inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry) } = new_partial::(&mut config, jaeger_agent, telemetry_worker_handle)?; let prometheus_registry = config.prometheus_registry().cloned(); let shared_voter_state = rpc_setup; // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change // anything in terms of behaviour, but makes the logs more consistent with the other // Substrate nodes. config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); #[cfg(feature = "real-overseer")] { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; config.network.extra_sets.extend(peer_sets_info(is_authority)); } // Add a dummy collation set with the intent of printing an error if one tries to connect a // collator to a node that isn't compiled with `--features real-overseer`. #[cfg(not(feature = "real-overseer"))] config.network.extra_sets.push(sc_network::config::NonDefaultSetConfig { notifications_protocol: "/polkadot/collation/1".into(), max_notification_size: 16, set_config: sc_network::config::SetConfig { in_peers: 25, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept, }, }); // TODO: At the moment, the collator protocol uses notifications protocols to download // collations. Because of DoS-protection measures, notifications protocols have a very limited // bandwidth capacity, resulting in the collation download taking a long time. // The lines of code below considerably relaxes this DoS protection in order to circumvent // this problem. This configuraiton change should preferably not reach any live network, and // should be removed once the collation protocol is finished. // Tracking issue: https://github.com/paritytech/polkadot/issues/2283 #[cfg(feature = "real-overseer")] fn adjust_yamux(cfg: &mut sc_network::config::NetworkConfiguration) { cfg.yamux_window_size = Some(5 * 1024 * 1024); } #[cfg(not(feature = "real-overseer"))] fn adjust_yamux(_: &mut sc_network::config::NetworkConfiguration) {} adjust_yamux(&mut config.network); config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( &config, task_manager.spawn_handle(), backend.clone(), import_setup.1.shared_authority_set().clone(), )); #[cfg(feature = "real-overseer")] fn register_request_response(config: &mut sc_network::config::NetworkConfiguration) -> RequestMultiplexer { let (multiplexer, configs) = RequestMultiplexer::new(); config.request_response_protocols.extend(configs); multiplexer } #[cfg(not(feature = "real-overseer"))] fn register_request_response(_: &mut sc_network::config::NetworkConfiguration) {} let request_multiplexer = register_request_response(&mut config.network); let (network, network_status_sinks, system_rpc_tx, network_starter) = service::build_network(service::BuildNetworkParams { config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, on_demand: None, block_announce_validator_builder: None, })?; // See above. We have added a dummy collation set with the intent of printing an error if one // tries to connect a collator to a node that isn't compiled with `--features real-overseer`. #[cfg(not(feature = "real-overseer"))] task_manager.spawn_handle().spawn("dummy-collation-handler", { let mut network_events = network.event_stream("dummy-collation-handler"); async move { use futures::prelude::*; while let Some(ev) = network_events.next().await { if let sc_network::Event::NotificationStreamOpened { protocol, .. } = ev { if protocol == "/polkadot/collation/1" { tracing::warn!( "Incoming collator on a node with parachains disabled. This warning \ is harmless and is here to warn developers that they might have \ accidentally compiled their node without the `real-overseer` feature \ enabled." ); } } } } }); if config.offchain_worker.enabled { let _ = service::build_offchain_workers( &config, task_manager.spawn_handle(), client.clone(), network.clone(), ); } let availability_config = config.database.clone().try_into().map_err(Error::Availability)?; let approval_voting_config = ApprovalVotingConfig { path: config.database.path() .ok_or(Error::DatabasePathRequired)? .join("parachains").join("approval-voting"), slot_duration_millis: slot_duration.as_millis() as u64, cache_size: None, // default is fine. }; let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { config, backend: backend.clone(), client: client.clone(), keystore: keystore_container.sync_keystore(), network: network.clone(), rpc_extensions_builder: Box::new(rpc_extensions_builder), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, on_demand: None, remote_blockchain: None, network_status_sinks: network_status_sinks.clone(), system_rpc_tx, telemetry: telemetry.as_mut(), })?; let (block_import, link_half, babe_link) = import_setup; let overseer_client = client.clone(); let spawner = task_manager.spawn_handle(); let active_leaves = active_leaves(&select_chain, &*client)?; let authority_discovery_service = if role.is_authority() || is_collator.is_collator() { use sc_network::Event; use futures::StreamExt; let authority_discovery_role = if role.is_authority() { sc_authority_discovery::Role::PublishAndDiscover( keystore_container.keystore(), ) } else { // don't publish our addresses when we're only a collator sc_authority_discovery::Role::Discover }; let dht_event_stream = network.event_stream("authority-discovery") .filter_map(|e| async move { match e { Event::Dht(e) => Some(e), _ => None, }}); let (worker, service) = sc_authority_discovery::new_worker_and_service( client.clone(), network.clone(), Box::pin(dht_event_stream), authority_discovery_role, prometheus_registry.clone(), ); task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run()); Some(service) } else { None }; // we'd say let overseer_handler = authority_discovery_service.map(|authority_discovery_service|, ...), // but in that case we couldn't use ? to propagate errors let local_keystore = keystore_container.local_keystore(); if local_keystore.is_none() { tracing::info!("Cannot run as validator without local keystore."); } let maybe_params = local_keystore .and_then(move |k| authority_discovery_service.map(|a| (a, k))); let overseer_handler = if let Some((authority_discovery_service, keystore)) = maybe_params { let (overseer, overseer_handler) = real_overseer( active_leaves, keystore, overseer_client.clone(), availability_config, network.clone(), authority_discovery_service, request_multiplexer, prometheus_registry.as_ref(), spawner, is_collator, isolation_strategy, approval_voting_config, )?; let overseer_handler_clone = overseer_handler.clone(); task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move { use futures::{pin_mut, select, FutureExt}; let forward = polkadot_overseer::forward_events(overseer_client, overseer_handler_clone); let forward = forward.fuse(); let overseer_fut = overseer.run().fuse(); pin_mut!(overseer_fut); pin_mut!(forward); select! { _ = forward => (), _ = overseer_fut => (), complete => (), } })); Some(overseer_handler) } else { None }; if role.is_authority() { let can_author_with = consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); let proposer = ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, overseer_handler.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(), prometheus_registry.as_ref(), telemetry.as_ref().map(|x| x.handle()), ); let babe_config = babe::BabeParams { keystore: keystore_container.sync_keystore(), client: client.clone(), select_chain, block_import, env: proposer, sync_oracle: network.clone(), inherent_data_providers: inherent_data_providers.clone(), force_authoring, backoff_authoring_blocks, babe_link, can_author_with, block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32), telemetry: telemetry.as_ref().map(|x| x.handle()), }; let babe = babe::start_babe(babe_config)?; task_manager.spawn_essential_handle().spawn_blocking("babe", babe); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. let keystore_opt = if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let config = grandpa::Config { // FIXME substrate#1578 make this available through chainspec gossip_duration: Duration::from_millis(1000), justification_period: 512, name: Some(name), observer_enabled: false, keystore: keystore_opt, is_authority: role.is_authority(), telemetry: telemetry.as_ref().map(|x| x.handle()), }; let enable_grandpa = !disable_grandpa; if enable_grandpa { // start the full GRANDPA voter // NOTE: unlike in substrate we are currently running the full // GRANDPA voter protocol for all full nodes (regardless of whether // they're validators or not). at this point the full voter should // provide better guarantees of block and vote data availability than // the observer. // add a custom voting rule to temporarily stop voting for new blocks // after the given pause block is finalized and restarting after the // given delay. let builder = grandpa::VotingRulesBuilder::default(); #[cfg(feature = "real-overseer")] let builder = if let Some(ref overseer) = overseer_handler { builder.add(grandpa_support::ApprovalCheckingDiagnostic::new( overseer.clone(), prometheus_registry.as_ref(), )?) } else { builder }; let voting_rule = match grandpa_pause { Some((block, delay)) => { info!( block_number = %block, delay = %delay, "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", block, delay, ); builder .add(grandpa_support::PauseAfterBlockFor(block, delay)) .build() } None => builder.build(), }; let grandpa_config = grandpa::GrandpaParams { config, link: link_half, network: network.clone(), voting_rule, prometheus_registry: prometheus_registry.clone(), shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), }; task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)? ); } network_starter.start_network(); Ok(NewFull { task_manager, client, overseer_handler, network, network_status_sinks, rpc_handlers, backend, }) } /// Builds a new service for a light client. fn new_light(mut config: Configuration) -> Result<( TaskManager, RpcHandlers, ), Error> where Runtime: 'static + Send + Sync + ConstructRuntimeApi>, >>::RuntimeApi: RuntimeApiCollection>, Dispatch: NativeExecutionDispatch + 'static, { set_prometheus_registry(&mut config)?; use sc_client_api::backend::RemoteBackend; let telemetry = config.telemetry_endpoints.clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, telemetry::Error> { let worker = TelemetryWorker::new(16)?; let telemetry = worker.handle().new_telemetry(endpoints); Ok((worker, telemetry)) }) .transpose()?; let (client, backend, keystore_container, mut task_manager, on_demand) = service::new_light_parts::( &config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; let mut telemetry = telemetry .map(|(worker, telemetry)| { task_manager.spawn_handle().spawn("telemetry", worker.run()); telemetry }); let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), task_manager.spawn_handle(), client.clone(), on_demand.clone(), )); let (grandpa_block_import, _) = grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), telemetry.as_ref().map(|x| x.handle()), )?; let justification_import = grandpa_block_import.clone(); let (babe_block_import, babe_link) = babe::block_import( babe::Config::get_or_compute(&*client)?, grandpa_block_import, client.clone(), )?; let inherent_data_providers = inherents::InherentDataProviders::new(); // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. let import_queue = babe::import_queue( babe_link, babe_block_import, Some(Box::new(justification_import)), client.clone(), select_chain.clone(), inherent_data_providers.clone(), &task_manager.spawn_essential_handle(), config.prometheus_registry(), consensus_common::NeverCanAuthor, telemetry.as_ref().map(|x| x.handle()), )?; let (network, network_status_sinks, system_rpc_tx, network_starter) = service::build_network(service::BuildNetworkParams { config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, })?; if config.offchain_worker.enabled { let _ = service::build_offchain_workers( &config, task_manager.spawn_handle(), client.clone(), network.clone(), ); } let light_deps = polkadot_rpc::LightDeps { remote_blockchain: backend.remote_blockchain(), fetcher: on_demand.clone(), client: client.clone(), pool: transaction_pool.clone(), }; let rpc_extensions = polkadot_rpc::create_light(light_deps); let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { on_demand: Some(on_demand), remote_blockchain: Some(backend.remote_blockchain()), rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)), task_manager: &mut task_manager, config, keystore: keystore_container.sync_keystore(), backend, transaction_pool, client, network, network_status_sinks, system_rpc_tx, telemetry: telemetry.as_mut(), })?; network_starter.start_network(); Ok((task_manager, rpc_handlers)) } /// Builds a new object suitable for chain operations. #[cfg(feature = "full-node")] pub fn new_chain_ops( mut config: &mut Configuration, jaeger_agent: Option, ) -> Result< ( Arc, Arc, consensus_common::import_queue::BasicQueue>, TaskManager, ), Error > { config.keystore = service::config::KeystoreConfig::InMemory; if config.chain_spec.is_rococo() { let service::PartialComponents { client, backend, import_queue, task_manager, .. } = new_partial::(config, jaeger_agent, None)?; Ok((Arc::new(Client::Rococo(client)), backend, import_queue, task_manager)) } else if config.chain_spec.is_kusama() { let service::PartialComponents { client, backend, import_queue, task_manager, .. } = new_partial::(config, jaeger_agent, None)?; Ok((Arc::new(Client::Kusama(client)), backend, import_queue, task_manager)) } else if config.chain_spec.is_westend() { let service::PartialComponents { client, backend, import_queue, task_manager, .. } = new_partial::(config, jaeger_agent, None)?; Ok((Arc::new(Client::Westend(client)), backend, import_queue, task_manager)) } else { let service::PartialComponents { client, backend, import_queue, task_manager, .. } = new_partial::(config, jaeger_agent, None)?; Ok((Arc::new(Client::Polkadot(client)), backend, import_queue, task_manager)) } } /// Build a new light node. pub fn build_light(config: Configuration) -> Result<( TaskManager, RpcHandlers, ), Error> { if config.chain_spec.is_rococo() { new_light::(config) } else if config.chain_spec.is_kusama() { new_light::(config) } else if config.chain_spec.is_westend() { new_light::(config) } else { new_light::(config) } } #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, is_collator: IsCollator, grandpa_pause: Option<(u32, u32)>, jaeger_agent: Option, telemetry_worker_handle: Option, ) -> Result, Error> { let isolation_strategy = { #[cfg(not(any(target_os = "android", target_os = "unknown")))] { let cache_base_path = config.database.path(); IsolationStrategy::external_process_with_caching(cache_base_path) } #[cfg(any(target_os = "android", target_os = "unknown"))] { IsolationStrategy::InProcess } }; if config.chain_spec.is_rococo() { new_full::( config, is_collator, grandpa_pause, jaeger_agent, isolation_strategy, telemetry_worker_handle, ).map(|full| full.with_client(Client::Rococo)) } else if config.chain_spec.is_kusama() { new_full::( config, is_collator, grandpa_pause, jaeger_agent, isolation_strategy, telemetry_worker_handle, ).map(|full| full.with_client(Client::Kusama)) } else if config.chain_spec.is_westend() { new_full::( config, is_collator, grandpa_pause, jaeger_agent, isolation_strategy, telemetry_worker_handle, ).map(|full| full.with_client(Client::Westend)) } else { new_full::( config, is_collator, grandpa_pause, jaeger_agent, isolation_strategy, telemetry_worker_handle, ).map(|full| full.with_client(Client::Polkadot)) } }