lib.rs 41.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! Polkadot service. Specialized wrapper over substrate service.

19
20
#![deny(unused_results)]

21
pub mod chain_spec;
22
23
mod grandpa_support;
mod client;
24
mod parachains_db;
25

26
#[cfg(feature = "full-node")]
27
use {
28
	tracing::info,
29
	polkadot_network_bridge::RequestMultiplexer,
30
	polkadot_node_core_av_store::Config as AvailabilityConfig,
31
	polkadot_node_core_av_store::Error as AvailabilityError,
32
	polkadot_node_core_approval_voting::Config as ApprovalVotingConfig,
33
	polkadot_node_core_candidate_validation::Config as CandidateValidationConfig,
34
35
	polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler},
	polkadot_primitives::v1::ParachainHost,
36
	sc_authority_discovery::Service as AuthorityDiscoveryService,
37
	sp_authority_discovery::AuthorityDiscoveryApi,
38
39
	sp_blockchain::HeaderBackend,
	sp_trie::PrefixedMemoryDB,
40
41
	sc_client_api::{AuxStore, ExecutorProvider},
	sc_keystore::LocalKeystore,
42
	sp_consensus_babe::BabeApi,
43
	grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider},
44
	beefy_primitives::ecdsa::AuthoritySignature as BeefySignature,
45
	sp_runtime::traits::Header as HeaderT,
46
47
};

48
49
50
51
use sp_core::traits::SpawnNamed;

use polkadot_subsystem::jaeger;

52
use std::sync::Arc;
53
use std::time::Duration;
54

55
56
use prometheus_endpoint::Registry;
use sc_executor::native_executor_instance;
57
use service::RpcHandlers;
58
use telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle};
59
60
61

pub use self::client::{AbstractClient, Client, ClientHandle, ExecuteWithClient, RuntimeApiCollection};
pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec, RococoChainSpec};
62
pub use consensus_common::{Proposal, SelectChain, BlockImport, block_validation::Chain};
63
pub use polkadot_primitives::v1::{Block, BlockId, CollatorPair, Hash, Id as ParaId};
64
65
66
pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor};
pub use sc_consensus::LongestChain;
pub use sc_executor::NativeExecutionDispatch;
67
pub use service::{
68
	Role, PruningMode, TransactionPoolOptions, Error as SubstrateServiceError, RuntimeGenesis,
69
	TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
70
	Configuration, ChainSpec, TaskManager,
71
72
73
};
pub use service::config::{DatabaseConfig, PrometheusConfig};
pub use sp_api::{ApiRef, Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend};
74
75
pub use sp_runtime::traits::{DigestFor, HashFor, NumberFor, Block as BlockT, self as runtime_traits, BlakeTwo256};

76
pub use kusama_runtime;
77
78
pub use polkadot_runtime;
pub use rococo_runtime;
79
80
pub use westend_runtime;

81
82
83
/// The maximum number of active leaves we forward to the [`Overseer`] on startup.
const MAX_ACTIVE_LEAVES: usize = 4;

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
native_executor_instance!(
	pub PolkadotExecutor,
	polkadot_runtime::api::dispatch,
	polkadot_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub KusamaExecutor,
	kusama_runtime::api::dispatch,
	kusama_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

native_executor_instance!(
	pub WestendExecutor,
	westend_runtime::api::dispatch,
	westend_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

105
106
107
108
109
110
111
native_executor_instance!(
	pub RococoExecutor,
	rococo_runtime::api::dispatch,
	rococo_runtime::native_version,
	frame_benchmarking::benchmarking::HostFunctions,
);

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#[derive(thiserror::Error, Debug)]
pub enum Error {
	#[error(transparent)]
	Io(#[from] std::io::Error),

	#[error(transparent)]
	AddrFormatInvalid(#[from] std::net::AddrParseError),

	#[error(transparent)]
	Sub(#[from] SubstrateServiceError),

	#[error(transparent)]
	Blockchain(#[from] sp_blockchain::Error),

	#[error(transparent)]
	Consensus(#[from] consensus_common::Error),

	#[error("Failed to create an overseer")]
	Overseer(#[from] polkadot_overseer::SubsystemError),

	#[error(transparent)]
	Prometheus(#[from] prometheus_endpoint::PrometheusError),

135
136
137
	#[error(transparent)]
	Telemetry(#[from] telemetry::Error),

138
139
140
	#[error(transparent)]
	Jaeger(#[from] polkadot_subsystem::jaeger::JaegerError),

141
142
143
144
145
146
	#[cfg(feature = "full-node")]
	#[error(transparent)]
	Availability(#[from] AvailabilityError),

	#[error("Authorities require the real overseer implementation")]
	AuthoritiesRequireRealOverseer,
147
148
149
150

	#[cfg(feature = "full-node")]
	#[error("Creating a custom database is required for validators")]
	DatabasePathRequired,
151
152
}

153
/// Can be called for a `Configuration` to identify which network the configuration targets.
154
155
156
157
158
159
pub trait IdentifyVariant {
	/// Returns if this is a configuration for the `Kusama` network.
	fn is_kusama(&self) -> bool;

	/// Returns if this is a configuration for the `Westend` network.
	fn is_westend(&self) -> bool;
160
161
162

	/// Returns if this is a configuration for the `Rococo` network.
	fn is_rococo(&self) -> bool;
163

164
165
166
	/// Returns if this is a configuration for the `Wococo` test network.
	fn is_wococo(&self) -> bool;

167
168
	/// Returns true if this configuration is for a development network.
	fn is_dev(&self) -> bool;
169
170
171
172
173
174
175
176
177
}

impl IdentifyVariant for Box<dyn ChainSpec> {
	fn is_kusama(&self) -> bool {
		self.id().starts_with("kusama") || self.id().starts_with("ksm")
	}
	fn is_westend(&self) -> bool {
		self.id().starts_with("westend") || self.id().starts_with("wnd")
	}
178
179
180
	fn is_rococo(&self) -> bool {
		self.id().starts_with("rococo") || self.id().starts_with("rco")
	}
181
182
183
	fn is_wococo(&self) -> bool {
		self.id().starts_with("wococo") || self.id().starts_with("wco")
	}
184
185
186
	fn is_dev(&self) -> bool {
		self.id().ends_with("dev")
	}
187
188
189
}

// If we're using prometheus, use a registry with a prefix of `polkadot`.
190
fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> {
191
192
193
194
195
196
197
	if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() {
		*registry = Registry::new_custom(Some("polkadot".into()), None)?;
	}

	Ok(())
}

198
199
200
201
202
203
204
205
206
207
208
209
210
211
/// Initialize the `Jeager` collector. The destination must listen
/// on the given address and port for `UDP` packets.
fn jaeger_launch_collector_with_agent(spawner: impl SpawnNamed, config: &Configuration, agent: Option<std::net::SocketAddr>) -> Result<(), Error> {
	if let Some(agent) = agent {
		let cfg = jaeger::JaegerConfig::builder()
			.agent(agent)
			.named(&config.network.node_name)
			.build();

		jaeger::Jaeger::new(cfg).launch(spawner)?;
	}
	Ok(())
}

212
pub type FullBackend = service::TFullBackend<Block>;
213
#[cfg(feature = "full-node")]
214
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
215
pub type FullClient<RuntimeApi, Executor> = service::TFullClient<Block, RuntimeApi, Executor>;
216
#[cfg(feature = "full-node")]
217
218
219
220
221
222
223
224
225
226
type FullGrandpaBlockImport<RuntimeApi, Executor> = grandpa::GrandpaBlockImport<
	FullBackend, Block, FullClient<RuntimeApi, Executor>, FullSelectChain
>;

type LightBackend = service::TLightBackendWithHash<Block, sp_runtime::traits::BlakeTwo256>;

type LightClient<RuntimeApi, Executor> =
	service::TLightClientWithBackend<Block, RuntimeApi, Executor, LightBackend>;

#[cfg(feature = "full-node")]
227
228
229
230
231
fn new_partial<RuntimeApi, Executor>(
	config: &mut Configuration,
	jaeger_agent: Option<std::net::SocketAddr>,
	telemetry_worker_handle: Option<TelemetryWorkerHandle>,
) -> Result<
232
233
	service::PartialComponents<
		FullClient<RuntimeApi, Executor>, FullBackend, FullSelectChain,
234
		consensus_common::DefaultImportQueue<Block, FullClient<RuntimeApi, Executor>>,
235
		sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi, Executor>>,
236
		(
237
238
239
240
			impl Fn(
				polkadot_rpc::DenyUnsafe,
				polkadot_rpc::SubscriptionTaskExecutor,
			) -> polkadot_rpc::RpcExtension,
241
242
243
244
245
			(
				babe::BabeBlockImport<
					Block, FullClient<RuntimeApi, Executor>, FullGrandpaBlockImport<RuntimeApi, Executor>
				>,
				grandpa::LinkHalf<Block, FullClient<RuntimeApi, Executor>, FullSelectChain>,
246
247
				babe::BabeLink<Block>,
				beefy_gadget::notification::BeefySignedCommitmentSender<Block, BeefySignature>,
248
			),
249
			grandpa::SharedVoterState,
250
			std::time::Duration, // slot-duration
251
			Option<Telemetry>,
252
		)
253
	>,
254
255
	Error
>
256
257
258
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
259
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
260
261
		Executor: NativeExecutionDispatch + 'static,
{
262
	set_prometheus_registry(config)?;
263

264

265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
	let telemetry = config.telemetry_endpoints.clone()
		.filter(|x| !x.is_empty())
		.map(move |endpoints| -> Result<_, telemetry::Error> {
			let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle {
				(None, worker_handle)
			} else {
				let worker = TelemetryWorker::new(16)?;
				let worker_handle = worker.handle();
				(Some(worker), worker_handle)
			};
			let telemetry = worker_handle.new_telemetry(endpoints);
			Ok((worker, telemetry))
		})
		.transpose()?;

280
	let (client, backend, keystore_container, task_manager) =
281
282
283
284
		service::new_full_parts::<Block, RuntimeApi, Executor>(
			&config,
			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
		)?;
285
286
	let client = Arc::new(client);

287
288
289
290
291
292
293
294
	let telemetry = telemetry
		.map(|(worker, telemetry)| {
			if let Some(worker) = worker {
				task_manager.spawn_handle().spawn("telemetry", worker.run());
			}
			telemetry
		});

295
296
	jaeger_launch_collector_with_agent(task_manager.spawn_handle(), &*config, jaeger_agent)?;

297
298
299
300
	let select_chain = sc_consensus::LongestChain::new(backend.clone());

	let transaction_pool = sc_transaction_pool::BasicPool::new_full(
		config.transaction_pool.clone(),
301
		config.role.is_authority().into(),
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
		config.prometheus_registry(),
		task_manager.spawn_handle(),
		client.clone(),
	);

	let grandpa_hard_forks = if config.chain_spec.is_kusama() {
		grandpa_support::kusama_hard_forks()
	} else {
		Vec::new()
	};

	let (grandpa_block_import, grandpa_link) =
		grandpa::block_import_with_authority_set_hard_forks(
			client.clone(),
			&(client.clone() as Arc<_>),
			select_chain.clone(),
			grandpa_hard_forks,
319
			telemetry.as_ref().map(|x| x.handle()),
320
321
322
323
		)?;

	let justification_import = grandpa_block_import.clone();

324
	let babe_config = babe::Config::get_or_compute(&*client)?;
325
	let (block_import, babe_link) = babe::block_import(
326
		babe_config.clone(),
327
328
329
330
		grandpa_block_import,
		client.clone(),
	)?;

331
	let slot_duration = babe_link.config().slot_duration();
332
333
334
335
336
337
	let import_queue = babe::import_queue(
		babe_link.clone(),
		block_import.clone(),
		Some(Box::new(justification_import)),
		client.clone(),
		select_chain.clone(),
338
339
340
341
342
343
344
345
346
347
348
		move |_, ()| async move {
			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

			let slot =
				sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
					*timestamp,
					slot_duration,
				);

			Ok((timestamp, slot))
		},
349
		&task_manager.spawn_essential_handle(),
350
		config.prometheus_registry(),
Wei Tang's avatar
Wei Tang committed
351
		consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()),
352
		telemetry.as_ref().map(|x| x.handle()),
353
354
	)?;

355
356
357
	let (beefy_link, beefy_commitment_stream) =
		beefy_gadget::notification::BeefySignedCommitmentStream::channel();

358
	let justification_stream = grandpa_link.justification_stream();
359
360
	let shared_authority_set = grandpa_link.shared_authority_set().clone();
	let shared_voter_state = grandpa::SharedVoterState::empty();
361
362
363
364
	let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
		backend.clone(),
		Some(shared_authority_set.clone()),
	);
365

366
	let import_setup = (block_import.clone(), grandpa_link, babe_link.clone(), beefy_link);
367
	let rpc_setup = shared_voter_state.clone();
368
369

	let shared_epoch_changes = babe_link.epoch_changes().clone();
370
	let slot_duration = babe_config.slot_duration();
371
372
373

	let rpc_extensions_builder = {
		let client = client.clone();
374
		let keystore = keystore_container.sync_keystore();
375
376
		let transaction_pool = transaction_pool.clone();
		let select_chain = select_chain.clone();
377
		let chain_spec = config.chain_spec.cloned_box();
378

379
380
381
		move |deny_unsafe, subscription_executor: polkadot_rpc::SubscriptionTaskExecutor|
			-> polkadot_rpc::RpcExtension
		{
382
383
384
385
			let deps = polkadot_rpc::FullDeps {
				client: client.clone(),
				pool: transaction_pool.clone(),
				select_chain: select_chain.clone(),
386
				chain_spec: chain_spec.cloned_box(),
387
388
389
390
391
392
393
394
395
				deny_unsafe,
				babe: polkadot_rpc::BabeDeps {
					babe_config: babe_config.clone(),
					shared_epoch_changes: shared_epoch_changes.clone(),
					keystore: keystore.clone(),
				},
				grandpa: polkadot_rpc::GrandpaDeps {
					shared_voter_state: shared_voter_state.clone(),
					shared_authority_set: shared_authority_set.clone(),
396
					justification_stream: justification_stream.clone(),
397
					subscription_executor: subscription_executor.clone(),
Jon Häggblad's avatar
Jon Häggblad committed
398
					finality_provider: finality_proof_provider.clone(),
399
				},
400
401
402
403
				beefy: polkadot_rpc::BeefyDeps {
					beefy_commitment_stream: beefy_commitment_stream.clone(),
					subscription_executor,
				},
404
405
406
			};

			polkadot_rpc::create_full(deps)
407
		}
408
409
	};

410
	Ok(service::PartialComponents {
411
412
413
414
415
416
417
		client,
		backend,
		task_manager,
		keystore_container,
		select_chain,
		import_queue,
		transaction_pool,
418
		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry)
419
	})
420
421
}

422
#[cfg(feature = "full-node")]
423
424
fn real_overseer<Spawner, RuntimeClient>(
	leaves: impl IntoIterator<Item = BlockInfo>,
425
	keystore: Arc<LocalKeystore>,
426
	runtime_client: Arc<RuntimeClient>,
427
	parachains_db: Arc<dyn kvdb::KeyValueDB>,
428
	availability_config: AvailabilityConfig,
429
	approval_voting_config: ApprovalVotingConfig,
430
431
	network_service: Arc<sc_network::NetworkService<Block, Hash>>,
	authority_discovery: AuthorityDiscoveryService,
432
	request_multiplexer: RequestMultiplexer,
433
434
435
	registry: Option<&Registry>,
	spawner: Spawner,
	is_collator: IsCollator,
436
	candidate_validation_config: CandidateValidationConfig,
437
) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandler), Error>
438
where
439
	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
440
	RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
	Spawner: 'static + SpawnNamed + Clone + Unpin,
{
	use polkadot_node_subsystem_util::metrics::Metrics;

	use polkadot_availability_distribution::AvailabilityDistributionSubsystem;
	use polkadot_node_core_av_store::AvailabilityStoreSubsystem;
	use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem;
	use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem;
	use polkadot_node_core_backing::CandidateBackingSubsystem;
	use polkadot_node_core_candidate_selection::CandidateSelectionSubsystem;
	use polkadot_node_core_candidate_validation::CandidateValidationSubsystem;
	use polkadot_node_core_chain_api::ChainApiSubsystem;
	use polkadot_node_collation_generation::CollationGenerationSubsystem;
	use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide};
	use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem;
	use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem;
	use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
	use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem;
459
	use polkadot_availability_recovery::AvailabilityRecoverySubsystem;
460
	use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem;
461
	use polkadot_node_core_approval_voting::ApprovalVotingSubsystem;
462
	use polkadot_gossip_support::GossipSupport as GossipSupportSubsystem;
463
464
465
466
467
468

	let all_subsystems = AllSubsystems {
		availability_distribution: AvailabilityDistributionSubsystem::new(
			keystore.clone(),
			Metrics::register(registry)?,
		),
469
		availability_recovery: AvailabilityRecoverySubsystem::with_chunks_only(
470
		),
471
472
		availability_store: AvailabilityStoreSubsystem::new(
			parachains_db.clone(),
473
474
			availability_config,
			Metrics::register(registry)?,
475
		),
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
		bitfield_distribution: BitfieldDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
		bitfield_signing: BitfieldSigningSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_backing: CandidateBackingSubsystem::new(
			spawner.clone(),
			keystore.clone(),
			Metrics::register(registry)?,
		),
		candidate_selection: CandidateSelectionSubsystem::new(
			spawner.clone(),
491
			keystore.clone(),
492
493
			Metrics::register(registry)?,
		),
494
495
		candidate_validation: CandidateValidationSubsystem::with_config(
			candidate_validation_config,
496
497
498
499
500
501
502
503
504
505
506
			Metrics::register(registry)?,
		),
		chain_api: ChainApiSubsystem::new(
			runtime_client.clone(),
			Metrics::register(registry)?,
		),
		collation_generation: CollationGenerationSubsystem::new(
			Metrics::register(registry)?,
		),
		collator_protocol: {
			let side = match is_collator {
507
508
509
510
511
				IsCollator::Yes(collator_pair) => ProtocolSide::Collator(
					network_service.local_peer_id().clone(),
					collator_pair,
					Metrics::register(registry)?,
				),
512
513
514
515
516
				IsCollator::No => ProtocolSide::Validator {
					keystore: keystore.clone(),
					eviction_policy: Default::default(),
					metrics: Metrics::register(registry)?,
				},
517
518
519
520
521
522
			};
			CollatorProtocolSubsystem::new(
				side,
			)
		},
		network_bridge: NetworkBridgeSubsystem::new(
523
			network_service.clone(),
524
			authority_discovery,
525
			request_multiplexer,
526
			Box::new(network_service.clone()),
527
			Metrics::register(registry)?,
528
529
530
531
532
533
534
		),
		provisioner: ProvisionerSubsystem::new(
			spawner.clone(),
			(),
			Metrics::register(registry)?,
		),
		runtime_api: RuntimeApiSubsystem::new(
535
			runtime_client.clone(),
536
			Metrics::register(registry)?,
537
			spawner.clone(),
538
539
		),
		statement_distribution: StatementDistributionSubsystem::new(
540
			keystore.clone(),
541
542
			Metrics::register(registry)?,
		),
543
544
545
		approval_distribution: ApprovalDistributionSubsystem::new(
			Metrics::register(registry)?,
		),
546
547
		approval_voting: ApprovalVotingSubsystem::with_config(
			approval_voting_config,
548
			parachains_db,
549
			keystore.clone(),
550
			Box::new(network_service.clone()),
551
			Metrics::register(registry)?,
552
		),
553
554
555
		gossip_support: GossipSupportSubsystem::new(
			keystore.clone(),
		),
556
557
558
559
560
561
	};

	Overseer::new(
		leaves,
		all_subsystems,
		registry,
562
		runtime_client.clone(),
563
		spawner,
564
	).map_err(|e| e.into())
565
566
}

567
#[cfg(feature = "full-node")]
568
569
570
pub struct NewFull<C> {
	pub task_manager: TaskManager,
	pub client: C,
571
	pub overseer_handler: Option<OverseerHandler>,
572
573
	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
	pub rpc_handlers: RpcHandlers,
574
	pub backend: Arc<FullBackend>,
575
576
577
578
}

#[cfg(feature = "full-node")]
impl<C> NewFull<C> {
579
580
	/// Convert the client type using the given `func`.
	pub fn with_client<NC>(self, func: impl FnOnce(C) -> NC) -> NewFull<NC> {
581
582
583
		NewFull {
			client: func(self.client),
			task_manager: self.task_manager,
584
			overseer_handler: self.overseer_handler,
585
586
			network: self.network,
			rpc_handlers: self.rpc_handlers,
587
			backend: self.backend,
588
589
590
591
		}
	}
}

592
593
/// Is this node a collator?
#[cfg(feature = "full-node")]
594
#[derive(Clone)]
595
596
pub enum IsCollator {
	/// This node is a collator.
597
	Yes(CollatorPair),
598
599
600
601
	/// This node is not a collator.
	No,
}

602
603
604
605
606
607
608
609
610
611
612
#[cfg(feature = "full-node")]
impl std::fmt::Debug for IsCollator {
	fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
		use sp_core::Pair;
		match self {
			IsCollator::Yes(pair) => write!(fmt, "Yes({})", pair.public()),
			IsCollator::No => write!(fmt, "No"),
		}
	}
}

613
614
615
616
#[cfg(feature = "full-node")]
impl IsCollator {
	/// Is this a collator?
	fn is_collator(&self) -> bool {
617
		matches!(self, Self::Yes(_))
618
619
620
	}
}

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
/// Returns the active leaves the overseer should start with.
#[cfg(feature = "full-node")]
fn active_leaves<RuntimeApi, Executor>(
	select_chain: &sc_consensus::LongestChain<FullBackend, Block>,
	client: &FullClient<RuntimeApi, Executor>,
) -> Result<Vec<BlockInfo>, Error>
where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
		Executor: NativeExecutionDispatch + 'static,
{
	let best_block = select_chain.best_chain()?;

	let mut leaves = select_chain
		.leaves()
		.unwrap_or_default()
		.into_iter()
		.filter_map(|hash| {
			let number = client.number(hash).ok()??;

			// Only consider leaves that are in maximum an uncle of the best block.
			if number < best_block.number().saturating_sub(1) {
				return None
			} else if hash == best_block.hash() {
				return None
			};

			let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;

			Some(BlockInfo {
				hash,
				parent_hash,
				number,
			})
		})
		.collect::<Vec<_>>();

	// Sort by block number and get the maximum number of leaves
	leaves.sort_by_key(|b| b.number);

	leaves.push(BlockInfo {
		hash: best_block.hash(),
		parent_hash: *best_block.parent_hash(),
		number: *best_block.number(),
	});

	Ok(leaves.into_iter().rev().take(MAX_ACTIVE_LEAVES).collect())
}

671
672
673
674
675
676
/// Create a new full node of arbitrary runtime and executor.
///
/// This is an advanced feature and not recommended for general use. Generally, `build_full` is
/// a better choice.
#[cfg(feature = "full-node")]
pub fn new_full<RuntimeApi, Executor>(
677
	mut config: Configuration,
678
	is_collator: IsCollator,
679
	grandpa_pause: Option<(u32, u32)>,
680
	disable_beefy: bool,
681
	jaeger_agent: Option<std::net::SocketAddr>,
682
	telemetry_worker_handle: Option<TelemetryWorkerHandle>,
683
	program_path: Option<std::path::PathBuf>,
684
) -> Result<NewFull<Arc<FullClient<RuntimeApi, Executor>>>, Error>
685
686
687
	where
		RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>> + Send + Sync + 'static,
		RuntimeApi::RuntimeApi:
688
		RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
689
690
691
692
		Executor: NativeExecutionDispatch + 'static,
{
	let role = config.role.clone();
	let force_authoring = config.force_authoring;
693
	let backoff_authoring_blocks = {
694
		let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default();
695

696
		if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
697
698
699
700
701
702
703
704
			// it's a testnet that's in flux, finality has stalled sometimes due
			// to operational issues and it's annoying to slow down block
			// production to 1 block per hour.
			backoff.max_interval = 10;
		}

		Some(backoff)
	};
705

706
707
708
	let disable_grandpa = config.disable_grandpa;
	let name = config.network.node_name.clone();

709
	let service::PartialComponents {
710
711
712
		client,
		backend,
		mut task_manager,
713
		keystore_container,
714
715
716
		select_chain,
		import_queue,
		transaction_pool,
717
718
		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry)
	} = new_partial::<RuntimeApi, Executor>(&mut config, jaeger_agent, telemetry_worker_handle)?;
719

720
721
	let prometheus_registry = config.prometheus_registry().cloned();

722
	let shared_voter_state = rpc_setup;
723
	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
724

725
726
727
728
	// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
	// anything in terms of behaviour, but makes the logs more consistent with the other
	// Substrate nodes.
	config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
729

730
	if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
731
732
733
		config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config());
	}

734
735
736
737
738
739
740
741
742
	{
		use polkadot_network_bridge::{peer_sets_info, IsAuthority};
		let is_authority = if role.is_authority() {
			IsAuthority::Yes
		} else {
			IsAuthority::No
		};
		config.network.extra_sets.extend(peer_sets_info(is_authority));
	}
743

744
	config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain(
745
		&config, task_manager.spawn_handle(), backend.clone(), import_setup.1.shared_authority_set().clone(),
746
	));
747
	let request_multiplexer = {
748
		let (multiplexer, configs) = RequestMultiplexer::new();
749
		config.network.request_response_protocols.extend(configs);
750
		multiplexer
751
	};
752

753
	let (network, system_rpc_tx, network_starter) =
754
755
756
757
758
759
760
761
762
763
764
		service::build_network(service::BuildNetworkParams {
			config: &config,
			client: client.clone(),
			transaction_pool: transaction_pool.clone(),
			spawn_handle: task_manager.spawn_handle(),
			import_queue,
			on_demand: None,
			block_announce_validator_builder: None,
		})?;

	if config.offchain_worker.enabled {
765
		let _ = service::build_offchain_workers(
766
			&config, task_manager.spawn_handle(), client.clone(), network.clone(),
767
768
		);
	}
769

770
771
772
773
774
775
776
777
778
	let parachains_db = crate::parachains_db::open_creating(
		config.database.path().ok_or(Error::DatabasePathRequired)?.into(),
		crate::parachains_db::CacheSizes::default(),
	)?;

	let availability_config = AvailabilityConfig {
		col_data: crate::parachains_db::REAL_COLUMNS.col_availability_data,
		col_meta: crate::parachains_db::REAL_COLUMNS.col_availability_meta,
	};
779

780
	let approval_voting_config = ApprovalVotingConfig {
781
		col_data: crate::parachains_db::REAL_COLUMNS.col_approval_data,
782
		slot_duration_millis: slot_duration.as_millis() as u64,
783
784
	};

785
786
787
788
789
790
791
792
793
794
795
	let candidate_validation_config = CandidateValidationConfig {
		artifacts_cache_path: config.database
			.path()
			.ok_or(Error::DatabasePathRequired)?
			.join("pvf-artifacts"),
		program_path: match program_path {
			None => std::env::current_exe()?,
			Some(p) => p,
		},
	};

796
	let chain_spec = config.chain_spec.cloned_box();
797
	let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
798
799
800
		config,
		backend: backend.clone(),
		client: client.clone(),
801
		keystore: keystore_container.sync_keystore(),
802
803
804
805
806
807
		network: network.clone(),
		rpc_extensions_builder: Box::new(rpc_extensions_builder),
		transaction_pool: transaction_pool.clone(),
		task_manager: &mut task_manager,
		on_demand: None,
		remote_blockchain: None,
808
		system_rpc_tx,
809
		telemetry: telemetry.as_mut(),
810
	})?;
811

812
	let (block_import, link_half, babe_link, beefy_link) = import_setup;
813
814
815

	let overseer_client = client.clone();
	let spawner = task_manager.spawn_handle();
816
	let active_leaves = active_leaves(&select_chain, &*client)?;
817

818
819
820
	let authority_discovery_service = if role.is_authority() || is_collator.is_collator() {
		use sc_network::Event;
		use futures::StreamExt;
821

822
		let authority_discovery_role = if role.is_authority() {
823
			sc_authority_discovery::Role::PublishAndDiscover(
824
825
826
827
				keystore_container.keystore(),
			)
		} else {
			// don't publish our addresses when we're only a collator
828
			sc_authority_discovery::Role::Discover
829
830
831
832
833
834
		};
		let dht_event_stream = network.event_stream("authority-discovery")
			.filter_map(|e| async move { match e {
				Event::Dht(e) => Some(e),
				_ => None,
			}});
835
836
837
838
839
		let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
			sc_authority_discovery::WorkerConfig {
				publish_non_global_ips: auth_disc_publish_non_global_ips,
				..Default::default()
			},
840
841
842
843
844
845
			client.clone(),
			network.clone(),
			Box::pin(dht_event_stream),
			authority_discovery_role,
			prometheus_registry.clone(),
		);
846

847
848
849
850
851
852
853
854
		task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run());
		Some(service)
	} else {
		None
	};

	// we'd say let overseer_handler = authority_discovery_service.map(|authority_discovery_service|, ...),
	// but in that case we couldn't use ? to propagate errors
855
856
857
858
859
860
861
862
863
	let local_keystore = keystore_container.local_keystore();
	if local_keystore.is_none() {
		tracing::info!("Cannot run as validator without local keystore.");
	}

	let maybe_params = local_keystore
		.and_then(move |k| authority_discovery_service.map(|a| (a, k)));

	let overseer_handler = if let Some((authority_discovery_service, keystore)) = maybe_params {
864
		let (overseer, overseer_handler) = real_overseer(
865
			active_leaves,
866
			keystore,
867
			overseer_client.clone(),
868
			parachains_db,
869
			availability_config,
870
			approval_voting_config,
871
872
			network.clone(),
			authority_discovery_service,
873
			request_multiplexer,
874
875
			prometheus_registry.as_ref(),
			spawner,
876
			is_collator,
877
			candidate_validation_config,
878
879
880
881
882
		)?;
		let overseer_handler_clone = overseer_handler.clone();

		task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move {
			use futures::{pin_mut, select, FutureExt};
883

884
885
886
887
888
889
890
			let forward = polkadot_overseer::forward_events(overseer_client, overseer_handler_clone);

			let forward = forward.fuse();
			let overseer_fut = overseer.run().fuse();

			pin_mut!(overseer_fut);
			pin_mut!(forward);
891

892
			select! {
893
894
895
				_ = forward => (),
				_ = overseer_fut => (),
				complete => (),
896
			}
897
898
899
		}));

		Some(overseer_handler)
900
901
902
	} else {
		None
	};
903
904
905
906
907

	if role.is_authority() {
		let can_author_with =
			consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone());

908
		let proposer = sc_basic_authorship::ProposerFactory::new(
909
			task_manager.spawn_handle(),
910
911
			client.clone(),
			transaction_pool,
912
			prometheus_registry.as_ref(),
913
			telemetry.as_ref().map(|x| x.handle()),
914
915
		);

916
917
918
		let client_clone = client.clone();
		let overseer_handler = overseer_handler.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone();
		let slot_duration = babe_link.config().slot_duration();
919
		let babe_config = babe::BabeParams {
920
			keystore: keystore_container.sync_keystore(),
921
922
923
924
925
			client: client.clone(),
			select_chain,
			block_import,
			env: proposer,
			sync_oracle: network.clone(),
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
			create_inherent_data_providers: move |parent, ()| {
				let client_clone = client_clone.clone();
				let overseer_handler = overseer_handler.clone();
				async move {
					let parachain = polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::create(
						&*client_clone,
						overseer_handler,
						parent,
					).await.map_err(|e| Box::new(e))?;

					let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider(
						&*client_clone,
						parent,
					)?;

					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

					let slot =
						sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
							*timestamp,
							slot_duration,
						);

					Ok((timestamp, slot, uncles, parachain))
				}
			},
952
			force_authoring,
953
			backoff_authoring_blocks,
954
955
			babe_link,
			can_author_with,
956
			block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32),
957
			telemetry: telemetry.as_ref().map(|x| x.handle()),
958
		};
959

960
961
962
		let babe = babe::start_babe(babe_config)?;
		task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
	}
963

Andreas Doerr's avatar
Andreas Doerr committed
964
965
966
967
968
969
970
971
	// if the node isn't actively participating in consensus then it doesn't
	// need a keystore, regardless of which protocol we use below.
	let keystore_opt = if role.is_authority() {
		Some(keystore_container.sync_keystore())
	} else {
		None
	};

972
	// We currently only run the BEEFY gadget on the Rococo and Wococo testnets.
973
	if !disable_beefy && (chain_spec.is_rococo() || chain_spec.is_wococo()) {
Andreas Doerr's avatar
Andreas Doerr committed
974
975
976
977
978
979
980
981
982
983
984
985
		let beefy_params = beefy_gadget::BeefyParams {
			client: client.clone(),
			backend: backend.clone(),
			key_store: keystore_opt.clone(),
			network: network.clone(),
			signed_commitment_sender: beefy_link,
			min_block_delta: if chain_spec.is_wococo() { 4 } else { 8 },
			prometheus_registry: prometheus_registry.clone(),
		};
	
		let gadget = beefy_gadget::start_beefy_gadget::<_, beefy_primitives::ecdsa::AuthorityPair, _, _, _>(
			beefy_params
986
987
		);

988
989
990
991
992
993
994
		// Wococo's purpose is to be a testbed for BEEFY, so if it fails we'll
		// bring the node down with it to make sure it is noticed.
		if chain_spec.is_wococo() {
			task_manager.spawn_essential_handle().spawn_blocking("beefy-gadget", gadget);
		} else {
			task_manager.spawn_handle().spawn_blocking("beefy-gadget", gadget);
		}
995
996
	}

997
998
999
1000
	let config = grandpa::Config {
		// FIXME substrate#1578 make this available through chainspec
		gossip_duration: Duration::from_millis(1000),
		justification_period: 512,
For faster browsing, not all history is shown. View entire blame