lib.rs 28.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! Polkadot service. Specialized wrapper over substrate service.

19
20
#![deny(unused_results)]

21
pub mod chain_spec;
22
23
24
25
mod grandpa_support;
mod client;

use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
26
#[cfg(feature = "full-node")]
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
use {
	std::convert::TryInto,
	std::time::Duration,

	log::info,
	polkadot_node_core_av_store::Config as AvailabilityConfig,
	polkadot_node_core_proposer::ProposerFactory,
	polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler},
	polkadot_primitives::v1::ParachainHost,
	authority_discovery::Service as AuthorityDiscoveryService,
	sp_blockchain::HeaderBackend,
	sp_core::traits::SpawnNamed,
	sp_keystore::SyncCryptoStorePtr,
	sp_trie::PrefixedMemoryDB,
	sc_client_api::ExecutorProvider,
};

use std::sync::Arc;

46
47
use prometheus_endpoint::Registry;
use sc_executor::native_executor_instance;
48
use service::RpcHandlers;
49
50
51
52
53

pub use self::client::{AbstractClient, Client, ClientHandle, ExecuteWithClient, RuntimeApiCollection};
pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec, RococoChainSpec};
pub use consensus_common::{Proposal, SelectChain, BlockImport, RecordProof, block_validation::Chain};
pub use polkadot_parachain::wasm_executor::run_worker as run_validation_worker;
54
pub use polkadot_primitives::v1::{Block, BlockId, CollatorId, Hash, Id as ParaId};
55
56
57
pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor};
pub use sc_consensus::LongestChain;
pub use sc_executor::NativeExecutionDispatch;
58
pub use service::{
59
	Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis,
60
	TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
61
	Configuration, ChainSpec, TaskManager,
62
63
64
};
pub use service::config::{DatabaseConfig, PrometheusConfig};
pub use sp_api::{ApiRef, Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend};
65
66
pub use sp_runtime::traits::{DigestFor, HashFor, NumberFor, Block as BlockT, self as runtime_traits, BlakeTwo256};

67
pub use kusama_runtime;
68
69
pub use polkadot_runtime;
pub use rococo_runtime;
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
pub use westend_runtime;

native_executor_instance!(
	pub PolkadotExecutor,
	polkadot_runtime::api::dispatch,
	polkadot_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub KusamaExecutor,
	kusama_runtime::api::dispatch,
	kusama_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub WestendExecutor,
	westend_runtime::api::dispatch,
	westend_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

93
94
95
96
97
98
99
native_executor_instance!(
	pub RococoExecutor,
	rococo_runtime::api::dispatch,
	rococo_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

100
101
102
103
104
105
106
/// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network.
pub trait IdentifyVariant {
	/// Returns if this is a configuration for the `Kusama` network.
	fn is_kusama(&self) -> bool;

	/// Returns if this is a configuration for the `Westend` network.
	fn is_westend(&self) -> bool;
107
108
109

	/// Returns if this is a configuration for the `Rococo` network.
	fn is_rococo(&self) -> bool;
110
111
112
113
114
115
116
117
118
}

impl IdentifyVariant for Box<dyn ChainSpec> {
	fn is_kusama(&self) -> bool {
		self.id().starts_with("kusama") || self.id().starts_with("ksm")
	}
	fn is_westend(&self) -> bool {
		self.id().starts_with("westend") || self.id().starts_with("wnd")
	}
119
120
121
	fn is_rococo(&self) -> bool {
		self.id().starts_with("rococo") || self.id().starts_with("rco")
	}
122
123
124
}

// If we're using prometheus, use a registry with a prefix of `polkadot`.
125
fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> {
126
127
128
129
130
131
132
	if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() {
		*registry = Registry::new_custom(Some("polkadot".into()), None)?;
	}

	Ok(())
}

133
pub type FullBackend = service::TFullBackend<Block>;
134
#[cfg(feature = "full-node")]
135
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
136
pub type FullClient<RuntimeApi, Executor> = service::TFullClient<Block, RuntimeApi, Executor>;
137
#[cfg(feature = "full-node")]
138
139
140
141
142
143
144
145
146
147
type FullGrandpaBlockImport<RuntimeApi, Executor> = grandpa::GrandpaBlockImport<
	FullBackend, Block, FullClient<RuntimeApi, Executor>, FullSelectChain
>;

type LightBackend = service::TLightBackendWithHash<Block, sp_runtime::traits::BlakeTwo256>;

type LightClient<RuntimeApi, Executor> =
	service::TLightClientWithBackend<Block, RuntimeApi, Executor, LightBackend>;

#[cfg(feature = "full-node")]
148
fn new_partial<RuntimeApi, Executor>(config: &mut Configuration) -> Result<
149
150
	service::PartialComponents<
		FullClient<RuntimeApi, Executor>, FullBackend, FullSelectChain,
151
		consensus_common::DefaultImportQueue<Block, FullClient<RuntimeApi, Executor>>,
152
		sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi, Executor>>,
153
		(
154
155
156
157
			impl Fn(
				polkadot_rpc::DenyUnsafe,
				polkadot_rpc::SubscriptionTaskExecutor,
			) -> polkadot_rpc::RpcExtension,
158
159
160
161
162
163
164
			(
				babe::BabeBlockImport<
					Block, FullClient<RuntimeApi, Executor>, FullGrandpaBlockImport<RuntimeApi, Executor>
				>,
				grandpa::LinkHalf<Block, FullClient<RuntimeApi, Executor>, FullSelectChain>,
				babe::BabeLink<Block>
			),
Jon Häggblad's avatar
Jon Häggblad committed
165
166
167
168
			(
				grandpa::SharedVoterState,
				Arc<GrandpaFinalityProofProvider<FullBackend, Block>>,
			),
169
		)
170
	>,
171
172
	Error
>
173
174
175
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
176
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
177
178
		Executor: NativeExecutionDispatch + 'static,
{
179
	set_prometheus_registry(config)?;
180
181
182

	let inherent_data_providers = inherents::InherentDataProviders::new();

183
	let (client, backend, keystore_container, task_manager) =
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
		service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
	let client = Arc::new(client);

	let select_chain = sc_consensus::LongestChain::new(backend.clone());

	let transaction_pool = sc_transaction_pool::BasicPool::new_full(
		config.transaction_pool.clone(),
		config.prometheus_registry(),
		task_manager.spawn_handle(),
		client.clone(),
	);

	let grandpa_hard_forks = if config.chain_spec.is_kusama() {
		grandpa_support::kusama_hard_forks()
	} else {
		Vec::new()
	};

	let (grandpa_block_import, grandpa_link) =
		grandpa::block_import_with_authority_set_hard_forks(
			client.clone(),
			&(client.clone() as Arc<_>),
			select_chain.clone(),
			grandpa_hard_forks,
		)?;

	let justification_import = grandpa_block_import.clone();

	let (block_import, babe_link) = babe::block_import(
		babe::Config::get_or_compute(&*client)?,
		grandpa_block_import,
		client.clone(),
	)?;

	let import_queue = babe::import_queue(
		babe_link.clone(),
		block_import.clone(),
		Some(Box::new(justification_import)),
		None,
		client.clone(),
		select_chain.clone(),
		inherent_data_providers.clone(),
		&task_manager.spawn_handle(),
		config.prometheus_registry(),
Wei Tang's avatar
Wei Tang committed
228
		consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()),
229
230
	)?;

231
	let justification_stream = grandpa_link.justification_stream();
232
233
	let shared_authority_set = grandpa_link.shared_authority_set().clone();
	let shared_voter_state = grandpa::SharedVoterState::empty();
Jon Häggblad's avatar
Jon Häggblad committed
234
235
	let finality_proof_provider =
		GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone());
236
237

	let import_setup = (block_import.clone(), grandpa_link, babe_link.clone());
Jon Häggblad's avatar
Jon Häggblad committed
238
	let rpc_setup = (shared_voter_state.clone(), finality_proof_provider.clone());
239
240
241
242
243
244

	let babe_config = babe_link.config().clone();
	let shared_epoch_changes = babe_link.epoch_changes().clone();

	let rpc_extensions_builder = {
		let client = client.clone();
245
		let keystore = keystore_container.sync_keystore();
246
247
		let transaction_pool = transaction_pool.clone();
		let select_chain = select_chain.clone();
248
		let chain_spec = config.chain_spec.cloned_box();
249

250
		move |deny_unsafe, subscription_executor| -> polkadot_rpc::RpcExtension {
251
252
253
254
			let deps = polkadot_rpc::FullDeps {
				client: client.clone(),
				pool: transaction_pool.clone(),
				select_chain: select_chain.clone(),
255
				chain_spec: chain_spec.cloned_box(),
256
257
258
259
260
261
262
263
264
				deny_unsafe,
				babe: polkadot_rpc::BabeDeps {
					babe_config: babe_config.clone(),
					shared_epoch_changes: shared_epoch_changes.clone(),
					keystore: keystore.clone(),
				},
				grandpa: polkadot_rpc::GrandpaDeps {
					shared_voter_state: shared_voter_state.clone(),
					shared_authority_set: shared_authority_set.clone(),
265
					justification_stream: justification_stream.clone(),
266
					subscription_executor,
Jon Häggblad's avatar
Jon Häggblad committed
267
					finality_provider: finality_proof_provider.clone(),
268
269
270
271
				},
			};

			polkadot_rpc::create_full(deps)
272
		}
273
274
	};

275
	Ok(service::PartialComponents {
276
277
278
279
280
281
282
		client,
		backend,
		task_manager,
		keystore_container,
		select_chain,
		import_queue,
		transaction_pool,
283
284
285
		inherent_data_providers,
		other: (rpc_extensions_builder, import_setup, rpc_setup)
	})
286
287
}

288
289
#[cfg(feature="full-node")]
fn real_overseer<Spawner, RuntimeClient>(
290
	leaves: impl IntoIterator<Item = BlockInfo>,
291
292
293
294
295
296
297
	keystore: SyncCryptoStorePtr,
	runtime_client: Arc<RuntimeClient>,
	availability_config: AvailabilityConfig,
	network_service: Arc<sc_network::NetworkService<Block, Hash>>,
	authority_discovery: AuthorityDiscoveryService,
	registry: Option<&Registry>,
	spawner: Spawner,
298
	is_collator: IsCollator,
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
) -> Result<(Overseer<Spawner>, OverseerHandler), Error>
where
	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block>,
	RuntimeClient::Api: ParachainHost<Block>,
	Spawner: 'static + SpawnNamed + Clone + Unpin,
{
	use polkadot_node_subsystem_util::metrics::Metrics;

	use polkadot_availability_distribution::AvailabilityDistributionSubsystem;
	use polkadot_node_core_av_store::AvailabilityStoreSubsystem;
	use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem;
	use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem;
	use polkadot_node_core_backing::CandidateBackingSubsystem;
	use polkadot_node_core_candidate_selection::CandidateSelectionSubsystem;
	use polkadot_node_core_candidate_validation::CandidateValidationSubsystem;
	use polkadot_node_core_chain_api::ChainApiSubsystem;
	use polkadot_node_collation_generation::CollationGenerationSubsystem;
	use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide};
	use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem;
	use polkadot_pov_distribution::PoVDistribution as PoVDistributionSubsystem;
	use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem;
	use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
	use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem;

323
	let all_subsystems = AllSubsystems {
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
		availability_distribution: AvailabilityDistributionSubsystem::new(
			keystore.clone(),
			Metrics::register(registry)?,
		),
		availability_store: AvailabilityStoreSubsystem::new_on_disk(
			availability_config,
			Metrics::register(registry)?,
		)?,
		bitfield_distribution: BitfieldDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
		bitfield_signing: BitfieldSigningSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_backing: CandidateBackingSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_selection: CandidateSelectionSubsystem::new(
			spawner.clone(),
			(),
			Metrics::register(registry)?,
		),
		candidate_validation: CandidateValidationSubsystem::new(
			spawner.clone(),
			Metrics::register(registry)?,
		),
		chain_api: ChainApiSubsystem::new(
			runtime_client.clone(),
			Metrics::register(registry)?,
		),
		collation_generation: CollationGenerationSubsystem::new(
			Metrics::register(registry)?,
		),
361
362
363
364
365
366
367
368
369
		collator_protocol: {
			let side = match is_collator {
			    IsCollator::Yes(id) => ProtocolSide::Collator(id, Metrics::register(registry)?),
			    IsCollator::No => ProtocolSide::Validator(Metrics::register(registry)?),
			};
			CollatorProtocolSubsystem::new(
				side,
			)
		},
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
		network_bridge: NetworkBridgeSubsystem::new(
			network_service,
			authority_discovery,
		),
		pov_distribution: PoVDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
		provisioner: ProvisionerSubsystem::new(
			spawner.clone(),
			(),
			Metrics::register(registry)?,
		),
		runtime_api: RuntimeApiSubsystem::new(
			runtime_client,
			Metrics::register(registry)?,
		),
		statement_distribution: StatementDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
389
	};
390

391
	Overseer::new(
392
		leaves,
393
		all_subsystems,
394
395
396
		registry,
		spawner,
	).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e)))
397
398
}

399
#[cfg(feature = "full-node")]
400
401
402
pub struct NewFull<C> {
	pub task_manager: TaskManager,
	pub client: C,
403
	pub overseer_handler: Option<OverseerHandler>,
404
405
406
	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
	pub network_status_sinks: service::NetworkStatusSinks<Block>,
	pub rpc_handlers: RpcHandlers,
407
	pub backend: Arc<FullBackend>,
408
409
410
411
}

#[cfg(feature = "full-node")]
impl<C> NewFull<C> {
412
413
	/// Convert the client type using the given `func`.
	pub fn with_client<NC>(self, func: impl FnOnce(C) -> NC) -> NewFull<NC> {
414
415
416
		NewFull {
			client: func(self.client),
			task_manager: self.task_manager,
417
			overseer_handler: self.overseer_handler,
418
419
420
			network: self.network,
			network_status_sinks: self.network_status_sinks,
			rpc_handlers: self.rpc_handlers,
421
			backend: self.backend,
422
423
424
425
		}
	}
}

426
427
/// Is this node a collator?
#[cfg(feature = "full-node")]
428
#[derive(Debug, PartialEq, Eq, Clone)]
429
430
pub enum IsCollator {
	/// This node is a collator.
431
	Yes(CollatorId),
432
433
434
435
436
437
438
439
	/// This node is not a collator.
	No,
}

#[cfg(feature = "full-node")]
impl IsCollator {
	/// Is this a collator?
	fn is_collator(&self) -> bool {
440
		matches!(self, Self::Yes(_))
441
442
443
	}
}

444
445
446
447
448
449
/// Create a new full node of arbitrary runtime and executor.
///
/// This is an advanced feature and not recommended for general use. Generally, `build_full` is
/// a better choice.
#[cfg(feature = "full-node")]
pub fn new_full<RuntimeApi, Executor>(
450
	mut config: Configuration,
451
	is_collator: IsCollator,
452
	grandpa_pause: Option<(u32, u32)>,
453
) -> Result<NewFull<Arc<FullClient<RuntimeApi, Executor>>>, Error>
454
455
456
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
457
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
458
459
460
461
462
463
464
		Executor: NativeExecutionDispatch + 'static,
{
	let role = config.role.clone();
	let force_authoring = config.force_authoring;
	let disable_grandpa = config.disable_grandpa;
	let name = config.network.node_name.clone();

465
	let service::PartialComponents {
466
467
468
		client,
		backend,
		mut task_manager,
469
		keystore_container,
470
471
472
		select_chain,
		import_queue,
		transaction_pool,
473
474
		inherent_data_providers,
		other: (rpc_extensions_builder, import_setup, rpc_setup)
475
476
	} = new_partial::<RuntimeApi, Executor>(&mut config)?;

477
478
	let prometheus_registry = config.prometheus_registry().cloned();

Jon Häggblad's avatar
Jon Häggblad committed
479
	let (shared_voter_state, finality_proof_provider) = rpc_setup;
480

481
	let (network, network_status_sinks, system_rpc_tx, network_starter) =
482
483
484
485
486
487
488
489
490
491
492
493
494
		service::build_network(service::BuildNetworkParams {
			config: &config,
			client: client.clone(),
			transaction_pool: transaction_pool.clone(),
			spawn_handle: task_manager.spawn_handle(),
			import_queue,
			on_demand: None,
			block_announce_validator_builder: None,
			finality_proof_request_builder: None,
			finality_proof_provider: Some(finality_proof_provider.clone()),
		})?;

	if config.offchain_worker.enabled {
495
		let _ = service::build_offchain_workers(
496
497
498
			&config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(),
		);
	}
499

500
501
	let telemetry_connection_sinks = service::TelemetryConnectionSinks::default();

502
503
	let availability_config = config.database.clone().try_into();

504
	let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
505
506
507
		config,
		backend: backend.clone(),
		client: client.clone(),
508
		keystore: keystore_container.sync_keystore(),
509
510
511
512
513
514
515
		network: network.clone(),
		rpc_extensions_builder: Box::new(rpc_extensions_builder),
		transaction_pool: transaction_pool.clone(),
		task_manager: &mut task_manager,
		on_demand: None,
		remote_blockchain: None,
		telemetry_connection_sinks: telemetry_connection_sinks.clone(),
516
517
		network_status_sinks: network_status_sinks.clone(),
		system_rpc_tx,
518
	})?;
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535

	let (block_import, link_half, babe_link) = import_setup;

	let overseer_client = client.clone();
	let spawner = task_manager.spawn_handle();
	let leaves: Vec<_> = select_chain.clone()
		.leaves()
		.unwrap_or_else(|_| vec![])
		.into_iter()
		.filter_map(|hash| {
			let number = client.number(hash).ok()??;
			let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;

			Some(BlockInfo {
				hash,
				parent_hash,
				number,
536
			})
537
538
		})
		.collect();
539

540
541
542
	let authority_discovery_service = if role.is_authority() || is_collator.is_collator() {
		use sc_network::Event;
		use futures::StreamExt;
543

544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
		let authority_discovery_role = if role.is_authority() {
			authority_discovery::Role::PublishAndDiscover(
				keystore_container.keystore(),
			)
		} else {
			// don't publish our addresses when we're only a collator
			authority_discovery::Role::Discover
		};
		let dht_event_stream = network.event_stream("authority-discovery")
			.filter_map(|e| async move { match e {
				Event::Dht(e) => Some(e),
				_ => None,
			}});
		let (worker, service) = authority_discovery::new_worker_and_service(
			client.clone(),
			network.clone(),
			Box::pin(dht_event_stream),
			authority_discovery_role,
			prometheus_registry.clone(),
		);
564

565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
		task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run());
		Some(service)
	} else {
		None
	};

	// we'd say let overseer_handler = authority_discovery_service.map(|authority_discovery_service|, ...),
	// but in that case we couldn't use ? to propagate errors
	let overseer_handler = if let Some(authority_discovery_service) = authority_discovery_service {
		let (overseer, overseer_handler) = real_overseer(
			leaves,
			keystore_container.sync_keystore(),
			overseer_client.clone(),
			availability_config?,
			network.clone(),
			authority_discovery_service,
			prometheus_registry.as_ref(),
			spawner,
583
			is_collator,
584
585
586
587
588
		)?;
		let overseer_handler_clone = overseer_handler.clone();

		task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move {
			use futures::{pin_mut, select, FutureExt};
589

590
591
592
593
594
595
596
			let forward = polkadot_overseer::forward_events(overseer_client, overseer_handler_clone);

			let forward = forward.fuse();
			let overseer_fut = overseer.run().fuse();

			pin_mut!(overseer_fut);
			pin_mut!(forward);
597

598
			select! {
599
600
601
				_ = forward => (),
				_ = overseer_fut => (),
				complete => (),
602
			}
603
604
605
606
		}));

		Some(overseer_handler)
	} else { None };
607
608
609
610
611
612

	if role.is_authority() {
		let can_author_with =
			consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone());

		let proposer = ProposerFactory::new(
613
			task_manager.spawn_handle(),
614
615
			client.clone(),
			transaction_pool,
616
			overseer_handler.as_ref().ok_or("authorities require real overseer handlers")?.clone(),
617
			prometheus_registry.as_ref(),
618
619
620
		);

		let babe_config = babe::BabeParams {
621
			keystore: keystore_container.sync_keystore(),
622
623
624
625
626
627
628
629
630
631
			client: client.clone(),
			select_chain,
			block_import,
			env: proposer,
			sync_oracle: network.clone(),
			inherent_data_providers: inherent_data_providers.clone(),
			force_authoring,
			babe_link,
			can_author_with,
		};
632

633
634
635
		let babe = babe::start_babe(babe_config)?;
		task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
	}
636

637
638
	// if the node isn't actively participating in consensus then it doesn't
	// need a keystore, regardless of which protocol we use below.
639
	let keystore_opt = if role.is_authority() {
640
		Some(keystore_container.sync_keystore())
641
642
643
	} else {
		None
	};
644

645
646
647
648
649
650
	let config = grandpa::Config {
		// FIXME substrate#1578 make this available through chainspec
		gossip_duration: Duration::from_millis(1000),
		justification_period: 512,
		name: Some(name),
		observer_enabled: false,
651
		keystore: keystore_opt,
652
653
		is_authority: role.is_network_authority(),
	};
654

655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
	let enable_grandpa = !disable_grandpa;
	if enable_grandpa {
		// start the full GRANDPA voter
		// NOTE: unlike in substrate we are currently running the full
		// GRANDPA voter protocol for all full nodes (regardless of whether
		// they're validators or not). at this point the full voter should
		// provide better guarantees of block and vote data availability than
		// the observer.

		// add a custom voting rule to temporarily stop voting for new blocks
		// after the given pause block is finalized and restarting after the
		// given delay.
		let voting_rule = match grandpa_pause {
			Some((block, delay)) => {
				info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.",
					block,
					delay,
				);
673

674
675
676
				grandpa::VotingRulesBuilder::default()
					.add(grandpa_support::PauseAfterBlockFor(block, delay))
					.build()
677
678
			}
			None => grandpa::VotingRulesBuilder::default().build(),
679
680
		};

681
682
683
684
		let grandpa_config = grandpa::GrandpaParams {
			config,
			link: link_half,
			network: network.clone(),
685
			telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()),
686
			voting_rule,
687
			prometheus_registry: prometheus_registry.clone(),
688
			shared_voter_state,
689
690
		};

691
692
693
694
695
		task_manager.spawn_essential_handle().spawn_blocking(
			"grandpa-voter",
			grandpa::run_grandpa_voter(grandpa_config)?
		);
	} else {
696
		grandpa::setup_disabled_grandpa(network.clone())?;
697
	}
698

699
700
	network_starter.start_network();

701
702
703
	Ok(NewFull {
		task_manager,
		client,
704
		overseer_handler,
705
706
707
		network,
		network_status_sinks,
		rpc_handlers,
708
		backend,
709
	})
710
711
712
}

/// Builds a new service for a light client.
713
fn new_light<Runtime, Dispatch>(mut config: Configuration) -> Result<(TaskManager, RpcHandlers), Error>
714
715
716
	where
		Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
		<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
717
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
718
719
		Dispatch: NativeExecutionDispatch + 'static,
{
720
	set_prometheus_registry(&mut config)?;
721
722
	use sc_client_api::backend::RemoteBackend;

723
	let (client, backend, keystore_container, mut task_manager, on_demand) =
724
725
726
727
728
729
730
731
		service::new_light_parts::<Block, Runtime, Dispatch>(&config)?;

	let select_chain = sc_consensus::LongestChain::new(backend.clone());

	let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
		config.transaction_pool.clone(),
		config.prometheus_registry(),
		task_manager.spawn_handle(),
732
733
		client.clone(),
		on_demand.clone(),
734
735
736
	));

	let grandpa_block_import = grandpa::light_block_import(
737
738
739
		client.clone(),
		backend.clone(),
		&(client.clone() as Arc<_>),
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
		Arc::new(on_demand.checker().clone()),
	)?;

	let finality_proof_import = grandpa_block_import.clone();
	let finality_proof_request_builder =
		finality_proof_import.create_finality_proof_request_builder();

	let (babe_block_import, babe_link) = babe::block_import(
		babe::Config::get_or_compute(&*client)?,
		grandpa_block_import,
		client.clone(),
	)?;

	let inherent_data_providers = inherents::InherentDataProviders::new();

	// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
	let import_queue = babe::import_queue(
		babe_link,
		babe_block_import,
		None,
		Some(Box::new(finality_proof_import)),
		client.clone(),
		select_chain.clone(),
		inherent_data_providers.clone(),
		&task_manager.spawn_handle(),
		config.prometheus_registry(),
Wei Tang's avatar
Wei Tang committed
766
		consensus_common::NeverCanAuthor,
767
768
	)?;

769
770
771
	let finality_proof_provider =
		GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone());

772
	let (network, network_status_sinks, system_rpc_tx, network_starter) =
773
774
775
776
777
778
779
780
781
782
783
		service::build_network(service::BuildNetworkParams {
			config: &config,
			client: client.clone(),
			transaction_pool: transaction_pool.clone(),
			spawn_handle: task_manager.spawn_handle(),
			import_queue,
			on_demand: Some(on_demand.clone()),
			block_announce_validator_builder: None,
			finality_proof_request_builder: Some(finality_proof_request_builder),
			finality_proof_provider: Some(finality_proof_provider),
		})?;
784

785
	if config.offchain_worker.enabled {
786
		let _ = service::build_offchain_workers(
787
788
789
790
791
			&config,
			backend.clone(),
			task_manager.spawn_handle(),
			client.clone(),
			network.clone(),
792
793
		);
	}
794
795
796
797
798
799
800
801
802
803

	let light_deps = polkadot_rpc::LightDeps {
		remote_blockchain: backend.remote_blockchain(),
		fetcher: on_demand.clone(),
		client: client.clone(),
		pool: transaction_pool.clone(),
	};

	let rpc_extensions = polkadot_rpc::create_light(light_deps);

804
	let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
805
806
807
		on_demand: Some(on_demand),
		remote_blockchain: Some(backend.remote_blockchain()),
		rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
808
809
		task_manager: &mut task_manager,
		telemetry_connection_sinks: service::TelemetryConnectionSinks::default(),
810
		config,
811
		keystore: keystore_container.sync_keystore(),
812
813
814
815
816
		backend,
		transaction_pool,
		client,
		network,
		network_status_sinks,
817
		system_rpc_tx,
818
	})?;
819

820
821
	network_starter.start_network();

822
	Ok((task_manager, rpc_handlers))
823
824
825
}

/// Builds a new object suitable for chain operations.
826
#[cfg(feature = "full-node")]
827
pub fn new_chain_ops(mut config: &mut Configuration) -> Result<
828
	(
829
		Arc<Client>,
830
		Arc<FullBackend>,
831
832
833
		consensus_common::import_queue::BasicQueue<Block, PrefixedMemoryDB<BlakeTwo256>>,
		TaskManager,
	),
834
	Error
835
>
836
837
{
	config.keystore = service::config::KeystoreConfig::InMemory;
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
	if config.chain_spec.is_rococo() {
		let service::PartialComponents { client, backend, import_queue, task_manager, .. }
			= new_partial::<rococo_runtime::RuntimeApi, RococoExecutor>(config)?;
		Ok((Arc::new(Client::Rococo(client)), backend, import_queue, task_manager))
	} else if config.chain_spec.is_kusama() {
		let service::PartialComponents { client, backend, import_queue, task_manager, .. }
			= new_partial::<kusama_runtime::RuntimeApi, KusamaExecutor>(config)?;
		Ok((Arc::new(Client::Kusama(client)), backend, import_queue, task_manager))
	} else if config.chain_spec.is_westend() {
		let service::PartialComponents { client, backend, import_queue, task_manager, .. }
			= new_partial::<westend_runtime::RuntimeApi, WestendExecutor>(config)?;
		Ok((Arc::new(Client::Westend(client)), backend, import_queue, task_manager))
	} else {
		let service::PartialComponents { client, backend, import_queue, task_manager, .. }
			= new_partial::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(config)?;
		Ok((Arc::new(Client::Polkadot(client)), backend, import_queue, task_manager))
	}
855
856
}

857
/// Build a new light node.
858
pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), Error> {
859
860
861
	if config.chain_spec.is_rococo() {
		new_light::<rococo_runtime::RuntimeApi, RococoExecutor>(config)
	} else if config.chain_spec.is_kusama() {
862
863
864
865
866
867
		new_light::<kusama_runtime::RuntimeApi, KusamaExecutor>(config)
	} else if config.chain_spec.is_westend() {
		new_light::<westend_runtime::RuntimeApi, WestendExecutor>(config)
	} else {
		new_light::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(config)
	}
868
869
870
}

#[cfg(feature = "full-node")]
871
pub fn build_full(
872
	config: Configuration,
873
	is_collator: IsCollator,
874
	grandpa_pause: Option<(u32, u32)>,
875
) -> Result<NewFull<Client>, Error> {
876
877
878
	if config.chain_spec.is_rococo() {
		new_full::<rococo_runtime::RuntimeApi, RococoExecutor>(
			config,
879
			is_collator,
880
881
882
			grandpa_pause,
		).map(|full| full.with_client(Client::Rococo))
	} else if config.chain_spec.is_kusama() {
883
884
		new_full::<kusama_runtime::RuntimeApi, KusamaExecutor>(
			config,
885
			is_collator,
886
			grandpa_pause,
887
		).map(|full| full.with_client(Client::Kusama))
888
889
890
	} else if config.chain_spec.is_westend() {
		new_full::<westend_runtime::RuntimeApi, WestendExecutor>(
			config,
891
			is_collator,
892
			grandpa_pause,
893
		).map(|full| full.with_client(Client::Westend))
894
895
896
	} else {
		new_full::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(
			config,
897
			is_collator,
898
			grandpa_pause,
899
		).map(|full| full.with_client(Client::Polkadot))
900
	}
901
}