lib.rs 35.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! Polkadot service. Specialized wrapper over substrate service.

19
20
#![deny(unused_results)]

21
pub mod chain_spec;
22
23
24
mod grandpa_support;
mod client;

25
#[cfg(feature = "full-node")]
26
27
28
use {
	std::convert::TryInto,
	std::time::Duration,
29
	tracing::info,
30
	polkadot_node_core_av_store::Config as AvailabilityConfig,
31
	polkadot_node_core_av_store::Error as AvailabilityError,
32
	polkadot_node_core_approval_voting::Config as ApprovalVotingConfig,
33
34
35
	polkadot_node_core_proposer::ProposerFactory,
	polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler},
	polkadot_primitives::v1::ParachainHost,
36
	sc_authority_discovery::Service as AuthorityDiscoveryService,
37
38
	sp_blockchain::HeaderBackend,
	sp_trie::PrefixedMemoryDB,
39
40
	sc_client_api::{AuxStore, ExecutorProvider},
	sc_keystore::LocalKeystore,
41
	babe_primitives::BabeApi,
42
	grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider},
43
};
44
45
#[cfg(feature = "real-overseer")]
use polkadot_network_bridge::RequestMultiplexer;
46

47
48
49
50
51
use sp_core::traits::SpawnNamed;


use polkadot_subsystem::jaeger;

52
53
use std::sync::Arc;

54
55
use prometheus_endpoint::Registry;
use sc_executor::native_executor_instance;
56
use service::RpcHandlers;
57
use telemetry::{TelemetryConnectionNotifier, TelemetrySpan};
58
59
60
61

pub use self::client::{AbstractClient, Client, ClientHandle, ExecuteWithClient, RuntimeApiCollection};
pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec, RococoChainSpec};
pub use consensus_common::{Proposal, SelectChain, BlockImport, RecordProof, block_validation::Chain};
62
pub use polkadot_parachain::wasm_executor::IsolationStrategy;
63
pub use polkadot_primitives::v1::{Block, BlockId, CollatorId, Hash, Id as ParaId};
64
65
66
pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor};
pub use sc_consensus::LongestChain;
pub use sc_executor::NativeExecutionDispatch;
67
pub use service::{
68
	Role, PruningMode, TransactionPoolOptions, Error as SubstrateServiceError, RuntimeGenesis,
69
	TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
70
	Configuration, ChainSpec, TaskManager,
71
72
73
};
pub use service::config::{DatabaseConfig, PrometheusConfig};
pub use sp_api::{ApiRef, Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend};
74
75
pub use sp_runtime::traits::{DigestFor, HashFor, NumberFor, Block as BlockT, self as runtime_traits, BlakeTwo256};

76
pub use kusama_runtime;
77
78
pub use polkadot_runtime;
pub use rococo_runtime;
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
pub use westend_runtime;

native_executor_instance!(
	pub PolkadotExecutor,
	polkadot_runtime::api::dispatch,
	polkadot_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub KusamaExecutor,
	kusama_runtime::api::dispatch,
	kusama_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub WestendExecutor,
	westend_runtime::api::dispatch,
	westend_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

102
103
104
105
106
107
108
native_executor_instance!(
	pub RococoExecutor,
	rococo_runtime::api::dispatch,
	rococo_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#[derive(thiserror::Error, Debug)]
pub enum Error {
	#[error(transparent)]
	Io(#[from] std::io::Error),

	#[error(transparent)]
	AddrFormatInvalid(#[from] std::net::AddrParseError),

	#[error(transparent)]
	Sub(#[from] SubstrateServiceError),

	#[error(transparent)]
	Blockchain(#[from] sp_blockchain::Error),

	#[error(transparent)]
	Consensus(#[from] consensus_common::Error),

	#[error("Failed to create an overseer")]
	Overseer(#[from] polkadot_overseer::SubsystemError),

	#[error(transparent)]
	Prometheus(#[from] prometheus_endpoint::PrometheusError),

132
133
134
	#[error(transparent)]
	Jaeger(#[from] polkadot_subsystem::jaeger::JaegerError),

135
136
137
138
139
140
	#[cfg(feature = "full-node")]
	#[error(transparent)]
	Availability(#[from] AvailabilityError),

	#[error("Authorities require the real overseer implementation")]
	AuthoritiesRequireRealOverseer,
141
142
143
144

	#[cfg(feature = "full-node")]
	#[error("Creating a custom database is required for validators")]
	DatabasePathRequired,
145
146
}

147
148
149
150
151
152
153
/// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network.
pub trait IdentifyVariant {
	/// Returns if this is a configuration for the `Kusama` network.
	fn is_kusama(&self) -> bool;

	/// Returns if this is a configuration for the `Westend` network.
	fn is_westend(&self) -> bool;
154
155
156

	/// Returns if this is a configuration for the `Rococo` network.
	fn is_rococo(&self) -> bool;
157
158
159
160
161
162
163
164
165
}

impl IdentifyVariant for Box<dyn ChainSpec> {
	fn is_kusama(&self) -> bool {
		self.id().starts_with("kusama") || self.id().starts_with("ksm")
	}
	fn is_westend(&self) -> bool {
		self.id().starts_with("westend") || self.id().starts_with("wnd")
	}
166
167
168
	fn is_rococo(&self) -> bool {
		self.id().starts_with("rococo") || self.id().starts_with("rco")
	}
169
170
171
}

// If we're using prometheus, use a registry with a prefix of `polkadot`.
172
fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> {
173
174
175
176
177
178
179
	if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() {
		*registry = Registry::new_custom(Some("polkadot".into()), None)?;
	}

	Ok(())
}

180
181
182
183
184
185
186
187
188
189
190
191
192
193
/// Initialize the `Jeager` collector. The destination must listen
/// on the given address and port for `UDP` packets.
fn jaeger_launch_collector_with_agent(spawner: impl SpawnNamed, config: &Configuration, agent: Option<std::net::SocketAddr>) -> Result<(), Error> {
	if let Some(agent) = agent {
		let cfg = jaeger::JaegerConfig::builder()
			.agent(agent)
			.named(&config.network.node_name)
			.build();

		jaeger::Jaeger::new(cfg).launch(spawner)?;
	}
	Ok(())
}

194
pub type FullBackend = service::TFullBackend<Block>;
195
#[cfg(feature = "full-node")]
196
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
197
pub type FullClient<RuntimeApi, Executor> = service::TFullClient<Block, RuntimeApi, Executor>;
198
#[cfg(feature = "full-node")]
199
200
201
202
203
204
205
206
207
208
type FullGrandpaBlockImport<RuntimeApi, Executor> = grandpa::GrandpaBlockImport<
	FullBackend, Block, FullClient<RuntimeApi, Executor>, FullSelectChain
>;

type LightBackend = service::TLightBackendWithHash<Block, sp_runtime::traits::BlakeTwo256>;

type LightClient<RuntimeApi, Executor> =
	service::TLightClientWithBackend<Block, RuntimeApi, Executor, LightBackend>;

#[cfg(feature = "full-node")]
209
fn new_partial<RuntimeApi, Executor>(config: &mut Configuration, jaeger_agent: Option<std::net::SocketAddr>) -> Result<
210
211
	service::PartialComponents<
		FullClient<RuntimeApi, Executor>, FullBackend, FullSelectChain,
212
		consensus_common::DefaultImportQueue<Block, FullClient<RuntimeApi, Executor>>,
213
		sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi, Executor>>,
214
		(
215
216
217
218
			impl Fn(
				polkadot_rpc::DenyUnsafe,
				polkadot_rpc::SubscriptionTaskExecutor,
			) -> polkadot_rpc::RpcExtension,
219
220
221
222
223
224
225
			(
				babe::BabeBlockImport<
					Block, FullClient<RuntimeApi, Executor>, FullGrandpaBlockImport<RuntimeApi, Executor>
				>,
				grandpa::LinkHalf<Block, FullClient<RuntimeApi, Executor>, FullSelectChain>,
				babe::BabeLink<Block>
			),
226
			grandpa::SharedVoterState,
227
			u64, // slot-duration
228
		)
229
	>,
230
231
	Error
>
232
233
234
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
235
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
236
237
		Executor: NativeExecutionDispatch + 'static,
{
238
	set_prometheus_registry(config)?;
239

240

241
242
	let inherent_data_providers = inherents::InherentDataProviders::new();

243
	let (client, backend, keystore_container, task_manager) =
244
245
246
		service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
	let client = Arc::new(client);

247
248
	jaeger_launch_collector_with_agent(task_manager.spawn_handle(), &*config, jaeger_agent)?;

249
250
251
252
	let select_chain = sc_consensus::LongestChain::new(backend.clone());

	let transaction_pool = sc_transaction_pool::BasicPool::new_full(
		config.transaction_pool.clone(),
253
		config.role.is_authority().into(),
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
		config.prometheus_registry(),
		task_manager.spawn_handle(),
		client.clone(),
	);

	let grandpa_hard_forks = if config.chain_spec.is_kusama() {
		grandpa_support::kusama_hard_forks()
	} else {
		Vec::new()
	};

	let (grandpa_block_import, grandpa_link) =
		grandpa::block_import_with_authority_set_hard_forks(
			client.clone(),
			&(client.clone() as Arc<_>),
			select_chain.clone(),
			grandpa_hard_forks,
		)?;

	let justification_import = grandpa_block_import.clone();

275
	let babe_config = babe::Config::get_or_compute(&*client)?;
276
	let (block_import, babe_link) = babe::block_import(
277
		babe_config.clone(),
278
279
280
281
282
283
284
285
286
287
288
		grandpa_block_import,
		client.clone(),
	)?;

	let import_queue = babe::import_queue(
		babe_link.clone(),
		block_import.clone(),
		Some(Box::new(justification_import)),
		client.clone(),
		select_chain.clone(),
		inherent_data_providers.clone(),
289
		&task_manager.spawn_essential_handle(),
290
		config.prometheus_registry(),
Wei Tang's avatar
Wei Tang committed
291
		consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()),
292
293
	)?;

294
	let justification_stream = grandpa_link.justification_stream();
295
296
	let shared_authority_set = grandpa_link.shared_authority_set().clone();
	let shared_voter_state = grandpa::SharedVoterState::empty();
297
298
299
300
	let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
		backend.clone(),
		Some(shared_authority_set.clone()),
	);
301
302

	let import_setup = (block_import.clone(), grandpa_link, babe_link.clone());
303
	let rpc_setup = shared_voter_state.clone();
304
305

	let shared_epoch_changes = babe_link.epoch_changes().clone();
306
	let slot_duration = babe_config.slot_duration();
307
308
309

	let rpc_extensions_builder = {
		let client = client.clone();
310
		let keystore = keystore_container.sync_keystore();
311
312
		let transaction_pool = transaction_pool.clone();
		let select_chain = select_chain.clone();
313
		let chain_spec = config.chain_spec.cloned_box();
314

315
		move |deny_unsafe, subscription_executor| -> polkadot_rpc::RpcExtension {
316
317
318
319
			let deps = polkadot_rpc::FullDeps {
				client: client.clone(),
				pool: transaction_pool.clone(),
				select_chain: select_chain.clone(),
320
				chain_spec: chain_spec.cloned_box(),
321
322
323
324
325
326
327
328
329
				deny_unsafe,
				babe: polkadot_rpc::BabeDeps {
					babe_config: babe_config.clone(),
					shared_epoch_changes: shared_epoch_changes.clone(),
					keystore: keystore.clone(),
				},
				grandpa: polkadot_rpc::GrandpaDeps {
					shared_voter_state: shared_voter_state.clone(),
					shared_authority_set: shared_authority_set.clone(),
330
					justification_stream: justification_stream.clone(),
331
					subscription_executor,
Jon Häggblad's avatar
Jon Häggblad committed
332
					finality_provider: finality_proof_provider.clone(),
333
334
335
336
				},
			};

			polkadot_rpc::create_full(deps)
337
		}
338
339
	};

340
	Ok(service::PartialComponents {
341
342
343
344
345
346
347
		client,
		backend,
		task_manager,
		keystore_container,
		select_chain,
		import_queue,
		transaction_pool,
348
		inherent_data_providers,
349
		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration)
350
	})
351
352
}

353
#[cfg(all(feature="full-node", not(feature = "real-overseer")))]
354
fn real_overseer<Spawner, RuntimeClient>(
355
	leaves: impl IntoIterator<Item = BlockInfo>,
356
	_: Arc<LocalKeystore>,
357
358
359
360
	_: Arc<RuntimeClient>,
	_: AvailabilityConfig,
	_: Arc<sc_network::NetworkService<Block, Hash>>,
	_: AuthorityDiscoveryService,
361
	_request_multiplexer: (),
362
363
	registry: Option<&Registry>,
	spawner: Spawner,
364
	_: IsCollator,
365
	_: IsolationStrategy,
366
	_: ApprovalVotingConfig,
367
368
) -> Result<(Overseer<Spawner>, OverseerHandler), Error>
where
369
370
	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
	RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block>,
371
372
	Spawner: 'static + SpawnNamed + Clone + Unpin,
{
373
	Overseer::new(
374
		leaves,
375
		AllSubsystems::<()>::dummy(),
376
377
		registry,
		spawner,
378
	).map_err(|e| e.into())
379
380
}

381
382
383
#[cfg(all(feature = "full-node", feature = "real-overseer"))]
fn real_overseer<Spawner, RuntimeClient>(
	leaves: impl IntoIterator<Item = BlockInfo>,
384
	keystore: Arc<LocalKeystore>,
385
386
387
388
	runtime_client: Arc<RuntimeClient>,
	availability_config: AvailabilityConfig,
	network_service: Arc<sc_network::NetworkService<Block, Hash>>,
	authority_discovery: AuthorityDiscoveryService,
389
	request_multiplexer: RequestMultiplexer,
390
391
392
	registry: Option<&Registry>,
	spawner: Spawner,
	is_collator: IsCollator,
393
	isolation_strategy: IsolationStrategy,
394
	approval_voting_config: ApprovalVotingConfig,
395
396
) -> Result<(Overseer<Spawner>, OverseerHandler), Error>
where
397
	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
398
	RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block>,
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
	Spawner: 'static + SpawnNamed + Clone + Unpin,
{
	use polkadot_node_subsystem_util::metrics::Metrics;

	use polkadot_availability_distribution::AvailabilityDistributionSubsystem;
	use polkadot_node_core_av_store::AvailabilityStoreSubsystem;
	use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem;
	use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem;
	use polkadot_node_core_backing::CandidateBackingSubsystem;
	use polkadot_node_core_candidate_selection::CandidateSelectionSubsystem;
	use polkadot_node_core_candidate_validation::CandidateValidationSubsystem;
	use polkadot_node_core_chain_api::ChainApiSubsystem;
	use polkadot_node_collation_generation::CollationGenerationSubsystem;
	use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide};
	use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem;
	use polkadot_pov_distribution::PoVDistribution as PoVDistributionSubsystem;
	use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem;
	use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
	use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem;
418
	use polkadot_availability_recovery::AvailabilityRecoverySubsystem;
419
	use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem;
420

421
	#[cfg(feature = "real-overseer")]
422
	use polkadot_node_core_approval_voting::ApprovalVotingSubsystem;
423

424
	#[cfg(not(feature = "real-overseer"))]
425
	let _ = approval_voting_config; // silence.
426

427
428
429
430
431
	let all_subsystems = AllSubsystems {
		availability_distribution: AvailabilityDistributionSubsystem::new(
			keystore.clone(),
			Metrics::register(registry)?,
		),
432
		availability_recovery: AvailabilityRecoverySubsystem::with_chunks_only(
433
		),
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
		availability_store: AvailabilityStoreSubsystem::new_on_disk(
			availability_config,
			Metrics::register(registry)?,
		)?,
		bitfield_distribution: BitfieldDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
		bitfield_signing: BitfieldSigningSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_backing: CandidateBackingSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_selection: CandidateSelectionSubsystem::new(
			spawner.clone(),
453
			keystore.clone(),
454
455
456
457
458
			Metrics::register(registry)?,
		),
		candidate_validation: CandidateValidationSubsystem::new(
			spawner.clone(),
			Metrics::register(registry)?,
459
			isolation_strategy,
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
		),
		chain_api: ChainApiSubsystem::new(
			runtime_client.clone(),
			Metrics::register(registry)?,
		),
		collation_generation: CollationGenerationSubsystem::new(
			Metrics::register(registry)?,
		),
		collator_protocol: {
			let side = match is_collator {
				IsCollator::Yes(id) => ProtocolSide::Collator(id, Metrics::register(registry)?),
				IsCollator::No => ProtocolSide::Validator(Metrics::register(registry)?),
			};
			CollatorProtocolSubsystem::new(
				side,
			)
		},
		network_bridge: NetworkBridgeSubsystem::new(
			network_service,
			authority_discovery,
480
			request_multiplexer,
481
482
483
484
485
486
487
488
489
490
		),
		pov_distribution: PoVDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
		provisioner: ProvisionerSubsystem::new(
			spawner.clone(),
			(),
			Metrics::register(registry)?,
		),
		runtime_api: RuntimeApiSubsystem::new(
491
			runtime_client.clone(),
492
			Metrics::register(registry)?,
493
			spawner.clone(),
494
495
496
497
		),
		statement_distribution: StatementDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
498
499
500
		approval_distribution: ApprovalDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
501
		#[cfg(feature = "real-overseer")]
502
503
		approval_voting: ApprovalVotingSubsystem::with_config(
			approval_voting_config,
504
			keystore.clone(),
505
		)?,
506
		#[cfg(not(feature = "real-overseer"))]
507
		approval_voting: polkadot_subsystem::DummySubsystem,
508
509
510
511
512
513
514
	};

	Overseer::new(
		leaves,
		all_subsystems,
		registry,
		spawner,
515
	).map_err(|e| e.into())
516
517
}

518
#[cfg(feature = "full-node")]
519
520
521
pub struct NewFull<C> {
	pub task_manager: TaskManager,
	pub client: C,
522
	pub overseer_handler: Option<OverseerHandler>,
523
524
525
	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
	pub network_status_sinks: service::NetworkStatusSinks<Block>,
	pub rpc_handlers: RpcHandlers,
526
	pub backend: Arc<FullBackend>,
527
528
529
530
}

#[cfg(feature = "full-node")]
impl<C> NewFull<C> {
531
532
	/// Convert the client type using the given `func`.
	pub fn with_client<NC>(self, func: impl FnOnce(C) -> NC) -> NewFull<NC> {
533
534
535
		NewFull {
			client: func(self.client),
			task_manager: self.task_manager,
536
			overseer_handler: self.overseer_handler,
537
538
539
			network: self.network,
			network_status_sinks: self.network_status_sinks,
			rpc_handlers: self.rpc_handlers,
540
			backend: self.backend,
541
542
543
544
		}
	}
}

545
546
/// Is this node a collator?
#[cfg(feature = "full-node")]
547
#[derive(Debug, PartialEq, Eq, Clone)]
548
549
pub enum IsCollator {
	/// This node is a collator.
550
	Yes(CollatorId),
551
552
553
554
555
556
557
558
	/// This node is not a collator.
	No,
}

#[cfg(feature = "full-node")]
impl IsCollator {
	/// Is this a collator?
	fn is_collator(&self) -> bool {
559
		matches!(self, Self::Yes(_))
560
561
562
	}
}

563
564
565
566
567
568
/// Create a new full node of arbitrary runtime and executor.
///
/// This is an advanced feature and not recommended for general use. Generally, `build_full` is
/// a better choice.
#[cfg(feature = "full-node")]
pub fn new_full<RuntimeApi, Executor>(
569
	mut config: Configuration,
570
	is_collator: IsCollator,
571
	grandpa_pause: Option<(u32, u32)>,
572
	jaeger_agent: Option<std::net::SocketAddr>,
573
	isolation_strategy: IsolationStrategy,
574
) -> Result<NewFull<Arc<FullClient<RuntimeApi, Executor>>>, Error>
575
576
577
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
578
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
579
580
581
582
		Executor: NativeExecutionDispatch + 'static,
{
	let role = config.role.clone();
	let force_authoring = config.force_authoring;
583
	let backoff_authoring_blocks =
584
585
586
587
588
589
		Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging {
			#[cfg(feature = "real-overseer")]
			unfinalized_slack: 100,
			..Default::default()
		});

590
591
592
	let disable_grandpa = config.disable_grandpa;
	let name = config.network.node_name.clone();

593
	let service::PartialComponents {
594
595
596
		client,
		backend,
		mut task_manager,
597
		keystore_container,
598
599
600
		select_chain,
		import_queue,
		transaction_pool,
601
		inherent_data_providers,
602
		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration)
603
	} = new_partial::<RuntimeApi, Executor>(&mut config, jaeger_agent)?;
604

605
606
	let prometheus_registry = config.prometheus_registry().cloned();

607
	let shared_voter_state = rpc_setup;
608

609
610
611
612
	// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
	// anything in terms of behaviour, but makes the logs more consistent with the other
	// Substrate nodes.
	config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
613
	#[cfg(feature = "real-overseer")]
614
	config.network.extra_sets.extend(polkadot_network_bridge::peer_sets_info());
615

616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
	// TODO: At the moment, the collator protocol uses notifications protocols to download
	// collations. Because of DoS-protection measures, notifications protocols have a very limited
	// bandwidth capacity, resulting in the collation download taking a long time.
	// The lines of code below considerably relaxes this DoS protection in order to circumvent
	// this problem. This configuraiton change should preferably not reach any live network, and
	// should be removed once the collation protocol is finished.
	// Tracking issue: https://github.com/paritytech/polkadot/issues/2283
	#[cfg(feature = "real-overseer")]
	fn adjust_yamux(cfg: &mut sc_network::config::NetworkConfiguration) {
		cfg.yamux_window_size = Some(5 * 1024 * 1024);
	}
	#[cfg(not(feature = "real-overseer"))]
	fn adjust_yamux(_: &mut sc_network::config::NetworkConfiguration) {}
	adjust_yamux(&mut config.network);

631
632
633
	config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain(
		&config, task_manager.spawn_handle(), backend.clone(),
	));
634
635
636
637
638
639
640
641
642
	#[cfg(feature = "real-overseer")]
	fn register_request_response(config: &mut sc_network::config::NetworkConfiguration) -> RequestMultiplexer {
		let (multiplexer, configs) = RequestMultiplexer::new();
		config.request_response_protocols.extend(configs);
		multiplexer
	}
	#[cfg(not(feature = "real-overseer"))]
	fn register_request_response(_: &mut sc_network::config::NetworkConfiguration) {}
	let request_multiplexer = register_request_response(&mut config.network);
643

644
	let (network, network_status_sinks, system_rpc_tx, network_starter) =
645
646
647
648
649
650
651
652
653
654
655
		service::build_network(service::BuildNetworkParams {
			config: &config,
			client: client.clone(),
			transaction_pool: transaction_pool.clone(),
			spawn_handle: task_manager.spawn_handle(),
			import_queue,
			on_demand: None,
			block_announce_validator_builder: None,
		})?;

	if config.offchain_worker.enabled {
656
		let _ = service::build_offchain_workers(
657
658
659
			&config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(),
		);
	}
660

661
	let availability_config = config.database.clone().try_into().map_err(Error::Availability)?;
662

663
664
665
666
667
668
669
670
	let approval_voting_config = ApprovalVotingConfig {
		path: config.database.path()
			.ok_or(Error::DatabasePathRequired)?
			.join("parachains").join("approval-voting"),
		slot_duration_millis: slot_duration,
		cache_size: None, // default is fine.
	};

671
672
673
	let telemetry_span = TelemetrySpan::new();
	let _telemetry_span_entered = telemetry_span.enter();

674
	let (rpc_handlers, telemetry_connection_notifier) = service::spawn_tasks(service::SpawnTasksParams {
675
676
677
		config,
		backend: backend.clone(),
		client: client.clone(),
678
		keystore: keystore_container.sync_keystore(),
679
680
681
682
683
684
		network: network.clone(),
		rpc_extensions_builder: Box::new(rpc_extensions_builder),
		transaction_pool: transaction_pool.clone(),
		task_manager: &mut task_manager,
		on_demand: None,
		remote_blockchain: None,
685
686
		network_status_sinks: network_status_sinks.clone(),
		system_rpc_tx,
687
		telemetry_span: Some(telemetry_span.clone()),
688
	})?;
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705

	let (block_import, link_half, babe_link) = import_setup;

	let overseer_client = client.clone();
	let spawner = task_manager.spawn_handle();
	let leaves: Vec<_> = select_chain.clone()
		.leaves()
		.unwrap_or_else(|_| vec![])
		.into_iter()
		.filter_map(|hash| {
			let number = client.number(hash).ok()??;
			let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;

			Some(BlockInfo {
				hash,
				parent_hash,
				number,
706
			})
707
708
		})
		.collect();
709

710
711
712
	let authority_discovery_service = if role.is_authority() || is_collator.is_collator() {
		use sc_network::Event;
		use futures::StreamExt;
713

714
		let authority_discovery_role = if role.is_authority() {
715
			sc_authority_discovery::Role::PublishAndDiscover(
716
717
718
719
				keystore_container.keystore(),
			)
		} else {
			// don't publish our addresses when we're only a collator
720
			sc_authority_discovery::Role::Discover
721
722
723
724
725
726
		};
		let dht_event_stream = network.event_stream("authority-discovery")
			.filter_map(|e| async move { match e {
				Event::Dht(e) => Some(e),
				_ => None,
			}});
727
		let (worker, service) = sc_authority_discovery::new_worker_and_service(
728
729
730
731
732
733
			client.clone(),
			network.clone(),
			Box::pin(dht_event_stream),
			authority_discovery_role,
			prometheus_registry.clone(),
		);
734

735
736
737
738
739
740
741
742
		task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run());
		Some(service)
	} else {
		None
	};

	// we'd say let overseer_handler = authority_discovery_service.map(|authority_discovery_service|, ...),
	// but in that case we couldn't use ? to propagate errors
743
744
745
746
747
748
749
750
751
	let local_keystore = keystore_container.local_keystore();
	if local_keystore.is_none() {
		tracing::info!("Cannot run as validator without local keystore.");
	}

	let maybe_params = local_keystore
		.and_then(move |k| authority_discovery_service.map(|a| (a, k)));

	let overseer_handler = if let Some((authority_discovery_service, keystore)) = maybe_params {
752
753
		let (overseer, overseer_handler) = real_overseer(
			leaves,
754
			keystore,
755
			overseer_client.clone(),
756
			availability_config,
757
758
			network.clone(),
			authority_discovery_service,
759
			request_multiplexer,
760
761
			prometheus_registry.as_ref(),
			spawner,
762
			is_collator,
763
			isolation_strategy,
764
			approval_voting_config,
765
766
767
768
769
		)?;
		let overseer_handler_clone = overseer_handler.clone();

		task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move {
			use futures::{pin_mut, select, FutureExt};
770

771
772
773
774
775
776
777
			let forward = polkadot_overseer::forward_events(overseer_client, overseer_handler_clone);

			let forward = forward.fuse();
			let overseer_fut = overseer.run().fuse();

			pin_mut!(overseer_fut);
			pin_mut!(forward);
778

779
			select! {
780
781
782
				_ = forward => (),
				_ = overseer_fut => (),
				complete => (),
783
			}
784
785
786
787
		}));

		Some(overseer_handler)
	} else { None };
788
789
790
791
792
793

	if role.is_authority() {
		let can_author_with =
			consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone());

		let proposer = ProposerFactory::new(
794
			task_manager.spawn_handle(),
795
796
			client.clone(),
			transaction_pool,
797
			overseer_handler.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(),
798
			prometheus_registry.as_ref(),
799
800
801
		);

		let babe_config = babe::BabeParams {
802
			keystore: keystore_container.sync_keystore(),
803
804
805
806
807
808
809
			client: client.clone(),
			select_chain,
			block_import,
			env: proposer,
			sync_oracle: network.clone(),
			inherent_data_providers: inherent_data_providers.clone(),
			force_authoring,
810
			backoff_authoring_blocks,
811
812
813
			babe_link,
			can_author_with,
		};
814

815
816
817
		let babe = babe::start_babe(babe_config)?;
		task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
	}
818

819
820
	// if the node isn't actively participating in consensus then it doesn't
	// need a keystore, regardless of which protocol we use below.
821
	let keystore_opt = if role.is_authority() {
822
		Some(keystore_container.sync_keystore())
823
824
825
	} else {
		None
	};
826

827
828
829
830
831
832
	let config = grandpa::Config {
		// FIXME substrate#1578 make this available through chainspec
		gossip_duration: Duration::from_millis(1000),
		justification_period: 512,
		name: Some(name),
		observer_enabled: false,
833
		keystore: keystore_opt,
834
		is_authority: role.is_authority(),
835
	};
836

837
838
839
840
841
842
843
844
845
846
847
848
	let enable_grandpa = !disable_grandpa;
	if enable_grandpa {
		// start the full GRANDPA voter
		// NOTE: unlike in substrate we are currently running the full
		// GRANDPA voter protocol for all full nodes (regardless of whether
		// they're validators or not). at this point the full voter should
		// provide better guarantees of block and vote data availability than
		// the observer.

		// add a custom voting rule to temporarily stop voting for new blocks
		// after the given pause block is finalized and restarting after the
		// given delay.
849
850
		let builder = grandpa::VotingRulesBuilder::default();

851
		#[cfg(feature = "real-overseer")]
852
853
854
855
856
857
858
859
860
		let builder = if let Some(ref overseer) = overseer_handler {
			builder.add(grandpa_support::ApprovalCheckingDiagnostic::new(
				overseer.clone(),
				prometheus_registry.as_ref(),
			)?)
		} else {
			builder
		};

861
862
		let voting_rule = match grandpa_pause {
			Some((block, delay)) => {
863
864
865
866
				info!(
					block_number = %block,
					delay = %delay,
					"GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.",
867
868
869
					block,
					delay,
				);
870

871
				builder
872
873
					.add(grandpa_support::PauseAfterBlockFor(block, delay))
					.build()
874
			}
875
			None => builder.build(),
876
877
		};

878
879
880
881
		let grandpa_config = grandpa::GrandpaParams {
			config,
			link: link_half,
			network: network.clone(),
882
			telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),
883
			voting_rule,
884
			prometheus_registry: prometheus_registry.clone(),
885
			shared_voter_state,
886
887
		};

888
889
890
891
892
		task_manager.spawn_essential_handle().spawn_blocking(
			"grandpa-voter",
			grandpa::run_grandpa_voter(grandpa_config)?
		);
	}
893

894
895
	network_starter.start_network();

896
897
898
	Ok(NewFull {
		task_manager,
		client,
899
		overseer_handler,
900
901
902
		network,
		network_status_sinks,
		rpc_handlers,
903
		backend,
904
	})
905
906
907
}

/// Builds a new service for a light client.
908
909
910
911
912
fn new_light<Runtime, Dispatch>(mut config: Configuration) -> Result<(
	TaskManager,
	RpcHandlers,
	Option<TelemetryConnectionNotifier>,
), Error>
913
914
915
	where
		Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
		<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
916
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
917
918
		Dispatch: NativeExecutionDispatch + 'static,
{
919
	set_prometheus_registry(&mut config)?;
920
921
	use sc_client_api::backend::RemoteBackend;

922
	let (client, backend, keystore_container, mut task_manager, on_demand) =
923
924
925
926
927
928
929
930
		service::new_light_parts::<Block, Runtime, Dispatch>(&config)?;

	let select_chain = sc_consensus::LongestChain::new(backend.clone());

	let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
		config.transaction_pool.clone(),
		config.prometheus_registry(),
		task_manager.spawn_handle(),
931
932
		client.clone(),
		on_demand.clone(),
933
934
	));

935
	let (grandpa_block_import, _) = grandpa::block_import(
936
937
		client.clone(),
		&(client.clone() as Arc<_>),
938
		select_chain.clone(),
939
	)?;
940
	let justification_import = grandpa_block_import.clone();
941
942
943
944
945
946
947
948
949
950
951
952
953

	let (babe_block_import, babe_link) = babe::block_import(
		babe::Config::get_or_compute(&*client)?,
		grandpa_block_import,
		client.clone(),
	)?;

	let inherent_data_providers = inherents::InherentDataProviders::new();

	// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
	let import_queue = babe::import_queue(
		babe_link,
		babe_block_import,
954
		Some(Box::new(justification_import)),
955
956
957
		client.clone(),
		select_chain.clone(),
		inherent_data_providers.clone(),
958
		&task_manager.spawn_essential_handle(),
959
		config.prometheus_registry(),
Wei Tang's avatar
Wei Tang committed
960
		consensus_common::NeverCanAuthor,
961
962
	)?;

963
	let (network, network_status_sinks, system_rpc_tx, network_starter) =
964
965
966
967
968
969
970
971
972
		service::build_network(service::BuildNetworkParams {
			config: &config,
			client: client.clone(),
			transaction_pool: transaction_pool.clone(),
			spawn_handle: task_manager.spawn_handle(),
			import_queue,
			on_demand: Some(on_demand.clone()),
			block_announce_validator_builder: None,
		})?;
973

974
	if config.offchain_worker.enabled {
975
		let _ = service::build_offchain_workers(
976
977
978
979
980
			&config,
			backend.clone(),
			task_manager.spawn_handle(),
			client.clone(),
			network.clone(),
981
982
		);
	}
983
984
985
986
987
988
989
990
991
992

	let light_deps = polkadot_rpc::LightDeps {
		remote_blockchain: backend.remote_blockchain(),
		fetcher: on_demand.clone(),
		client: client.clone(),
		pool: transaction_pool.clone(),
	};

	let rpc_extensions = polkadot_rpc::create_light(light_deps);

993
994
995
	let telemetry_span = TelemetrySpan::new();
	let _telemetry_span_entered = telemetry_span.enter();

996
	let (rpc_handlers, telemetry_connection_notifier) = service::spawn_tasks(service::SpawnTasksParams {
997
998
999
		on_demand: Some(on_demand),
		remote_blockchain: Some(backend.remote_blockchain()),
		rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
1000
		task_manager: &mut task_manager,
For faster browsing, not all history is shown. View entire blame