Newer
Older
// FIXME substrate#1578 make this available through chainspec
gossip_duration: Duration::from_millis(1000),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: keystore_opt,
telemetry: telemetry.as_ref().map(|x| x.handle()),
let enable_grandpa = !disable_grandpa;
if enable_grandpa {
// start the full GRANDPA voter
// NOTE: unlike in substrate we are currently running the full
// GRANDPA voter protocol for all full nodes (regardless of whether
// they're validators or not). at this point the full voter should
// provide better guarantees of block and vote data availability than
// the observer.
// add a custom voting rule to temporarily stop voting for new blocks
// after the given pause block is finalized and restarting after the
// given delay.
let builder = grandpa::VotingRulesBuilder::default();
let voting_rule = match grandpa_pause {
Some((block, delay)) => {
info!(
block_number = %block,
delay = %delay,
"GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.",
block,
delay,
);
builder.add(grandpa_support::PauseAfterBlockFor(block, delay)).build()
},
None => builder.build(),
let grandpa_config = grandpa::GrandpaParams {
config,
link: link_half,
network: network.clone(),
voting_rule,
prometheus_registry: prometheus_registry.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
task_manager
.spawn_essential_handle()
.spawn_blocking("grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)?);
network_starter.start_network();
Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend })
}
/// Builds a new service for a light client.
#[cfg(feature = "light-node")]
fn new_light<Runtime, Dispatch>(
mut config: Configuration,
) -> Result<(TaskManager, RpcHandlers), Error>
where
Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
Dispatch: NativeExecutionDispatch + 'static,
use sc_client_api::backend::RemoteBackend;
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, mut task_manager, on_demand) =
service::new_light_parts::<Block, Runtime, Dispatch>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
on_demand.clone(),
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
telemetry.as_ref().map(|x| x.handle()),
let justification_import = grandpa_block_import.clone();
let (babe_block_import, babe_link) = babe::block_import(
babe::Config::get_or_compute(&*client)?,
grandpa_block_import,
client.clone(),
)?;
// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let slot_duration = babe_link.config().slot_duration();
let import_queue = babe::import_queue(
babe_link,
babe_block_import,
Some(Box::new(justification_import)),
client.clone(),
select_chain.clone(),
move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
*timestamp,
slot_duration,
);
Ok((timestamp, slot))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
telemetry.as_ref().map(|x| x.handle()),
let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
let (network, system_rpc_tx, network_starter) =
service::build_network(service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
warp_sync: Some(warp_sync),
})?;
let enable_grandpa = !config.disable_grandpa;
if enable_grandpa {
let name = config.network.node_name.clone();
let config = grandpa::Config {
gossip_duration: Duration::from_millis(1000),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: None,
local_role: config.role.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
task_manager.spawn_handle().spawn_blocking(
"grandpa-observer",
grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
);
}
if config.offchain_worker.enabled {
let _ = service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}
let light_deps = polkadot_rpc::LightDeps {
remote_blockchain: backend.remote_blockchain(),
fetcher: on_demand.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
};
let rpc_extensions = polkadot_rpc::create_light(light_deps);
let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
on_demand: Some(on_demand),
remote_blockchain: Some(backend.remote_blockchain()),
rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
task_manager: &mut task_manager,
keystore: keystore_container.sync_keystore(),
backend,
transaction_pool,
client,
network,
system_rpc_tx,
network_starter.start_network();
}
/// Builds a new object suitable for chain operations.
#[cfg(feature = "full-node")]
pub fn new_chain_ops(
mut config: &mut Configuration,
jaeger_agent: Option<std::net::SocketAddr>,
) -> Result<
sc_consensus::BasicQueue<Block, PrefixedMemoryDB<BlakeTwo256>>,
config.keystore = service::config::KeystoreConfig::InMemory;
#[cfg(feature = "rococo-native")]
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. } =
new_partial::<rococo_runtime::RuntimeApi, RococoExecutorDispatch>(
config,
jaeger_agent,
None,
)?;
return Ok((Arc::new(Client::Rococo(client)), backend, import_queue, task_manager))
}
#[cfg(feature = "kusama-native")]
if config.chain_spec.is_kusama() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. } =
new_partial::<kusama_runtime::RuntimeApi, KusamaExecutorDispatch>(
config,
jaeger_agent,
None,
)?;
return Ok((Arc::new(Client::Kusama(client)), backend, import_queue, task_manager))
}
#[cfg(feature = "westend-native")]
if config.chain_spec.is_westend() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. } =
new_partial::<westend_runtime::RuntimeApi, WestendExecutorDispatch>(
config,
jaeger_agent,
None,
)?;
return Ok((Arc::new(Client::Westend(client)), backend, import_queue, task_manager))
let service::PartialComponents { client, backend, import_queue, task_manager, .. } =
new_partial::<polkadot_runtime::RuntimeApi, PolkadotExecutorDispatch>(
config,
jaeger_agent,
None,
)?;
Ok((Arc::new(Client::Polkadot(client)), backend, import_queue, task_manager))
/// Build a new light node.
#[cfg(feature = "light-node")]
pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), Error> {
#[cfg(feature = "rococo-native")]
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
return new_light::<rococo_runtime::RuntimeApi, RococoExecutorDispatch>(config)
}
#[cfg(feature = "kusama-native")]
if config.chain_spec.is_kusama() {
return new_light::<kusama_runtime::RuntimeApi, KusamaExecutorDispatch>(config)
#[cfg(feature = "westend-native")]
if config.chain_spec.is_westend() {
return new_light::<westend_runtime::RuntimeApi, WestendExecutorDispatch>(config)
new_light::<polkadot_runtime::RuntimeApi, PolkadotExecutorDispatch>(config)
}
#[cfg(feature = "full-node")]
is_collator: IsCollator,
jaeger_agent: Option<std::net::SocketAddr>,
telemetry_worker_handle: Option<TelemetryWorkerHandle>,
overseer_gen: impl OverseerGen,
) -> Result<NewFull<Client>, Error> {
#[cfg(feature = "rococo-native")]
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
return new_full::<rococo_runtime::RuntimeApi, RococoExecutorDispatch, _>(
is_collator,
)
.map(|full| full.with_client(Client::Rococo))
}
#[cfg(feature = "kusama-native")]
if config.chain_spec.is_kusama() {
return new_full::<kusama_runtime::RuntimeApi, KusamaExecutorDispatch, _>(
is_collator,
)
.map(|full| full.with_client(Client::Kusama))
}
#[cfg(feature = "westend-native")]
if config.chain_spec.is_westend() {
return new_full::<westend_runtime::RuntimeApi, WestendExecutorDispatch, _>(
is_collator,
)
.map(|full| full.with_client(Client::Westend))
new_full::<polkadot_runtime::RuntimeApi, PolkadotExecutorDispatch, _>(
config,
is_collator,
grandpa_pause,
disable_beefy,
jaeger_agent,
telemetry_worker_handle,
None,
)
.map(|full| full.with_client(Client::Polkadot))