Newer
Older
block_proposal_slot_portion: babe::SlotProportion::new(2f32 / 3f32),
telemetry: telemetry.as_ref().map(|x| x.handle()),
let babe = babe::start_babe(babe_config)?;
task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
// We currently only run the BEEFY gadget on Rococo and Westend test
// networks. On Rococo we start the BEEFY gadget as a normal (non-essential)
// task for now, since BEEFY is still experimental and we don't want a
// failure to bring down the whole node. Westend test network is less used
// than Rococo and therefore a failure there will be less problematic, this
// will be the main testing target for BEEFY for now.
if chain_spec.is_westend() || chain_spec.is_rococo() {
let gadget = beefy_gadget::start_beefy_gadget::<_, beefy_primitives::ecdsa::AuthorityPair, _, _, _, _>(
client.clone(),
keystore_container.sync_keystore(),
network.clone(),
beefy_link,
network.clone(),
prometheus_registry.clone()
);
if chain_spec.is_westend() {
task_manager.spawn_essential_handle().spawn_blocking("beefy-gadget", gadget);
} else {
task_manager.spawn_handle().spawn_blocking("beefy-gadget", gadget);
}
}
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore_opt = if role.is_authority() {
Some(keystore_container.sync_keystore())
} else {
None
};
let config = grandpa::Config {
// FIXME substrate#1578 make this available through chainspec
gossip_duration: Duration::from_millis(1000),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: keystore_opt,
is_authority: role.is_authority(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
let enable_grandpa = !disable_grandpa;
if enable_grandpa {
// start the full GRANDPA voter
// NOTE: unlike in substrate we are currently running the full
// GRANDPA voter protocol for all full nodes (regardless of whether
// they're validators or not). at this point the full voter should
// provide better guarantees of block and vote data availability than
// the observer.
// add a custom voting rule to temporarily stop voting for new blocks
// after the given pause block is finalized and restarting after the
// given delay.
let builder = grandpa::VotingRulesBuilder::default();
#[cfg(feature = "real-overseer")]
let builder = if let Some(ref overseer) = overseer_handler {
builder.add(grandpa_support::ApprovalCheckingVotingRule::new(
overseer.clone(),
prometheus_registry.as_ref(),
)?)
} else {
builder
};
let voting_rule = match grandpa_pause {
Some((block, delay)) => {
info!(
block_number = %block,
delay = %delay,
"GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.",
block,
delay,
);
builder
.add(grandpa_support::PauseAfterBlockFor(block, delay))
.build()
None => builder.build(),
let grandpa_config = grandpa::GrandpaParams {
config,
link: link_half,
network: network.clone(),
voting_rule,
prometheus_registry: prometheus_registry.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
grandpa::run_grandpa_voter(grandpa_config)?
);
}
network_starter.start_network();
Ok(NewFull {
task_manager,
client,
network,
network_status_sinks,
rpc_handlers,
}
/// Builds a new service for a light client.
fn new_light<Runtime, Dispatch>(mut config: Configuration) -> Result<(
TaskManager,
RpcHandlers,
), Error>
where
Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
Dispatch: NativeExecutionDispatch + 'static,
{
use sc_client_api::backend::RemoteBackend;
let telemetry = config.telemetry_endpoints.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, mut task_manager, on_demand) =
service::new_light_parts::<Block, Runtime, Dispatch>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let mut telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
client.clone(),
on_demand.clone(),
let (grandpa_block_import, _) = grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
telemetry.as_ref().map(|x| x.handle()),
let justification_import = grandpa_block_import.clone();
let (babe_block_import, babe_link) = babe::block_import(
babe::Config::get_or_compute(&*client)?,
grandpa_block_import,
client.clone(),
)?;
let inherent_data_providers = inherents::InherentDataProviders::new();
// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let import_queue = babe::import_queue(
babe_link,
babe_block_import,
Some(Box::new(justification_import)),
client.clone(),
select_chain.clone(),
inherent_data_providers.clone(),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
telemetry.as_ref().map(|x| x.handle()),
let (network, network_status_sinks, system_rpc_tx, network_starter) =
service::build_network(service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
let _ = service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}
let light_deps = polkadot_rpc::LightDeps {
remote_blockchain: backend.remote_blockchain(),
fetcher: on_demand.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
};
let rpc_extensions = polkadot_rpc::create_light(light_deps);
let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
on_demand: Some(on_demand),
remote_blockchain: Some(backend.remote_blockchain()),
rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
task_manager: &mut task_manager,
keystore: keystore_container.sync_keystore(),
backend,
transaction_pool,
client,
network,
network_status_sinks,
system_rpc_tx,
network_starter.start_network();
}
/// Builds a new object suitable for chain operations.
#[cfg(feature = "full-node")]
pub fn new_chain_ops(
mut config: &mut Configuration,
jaeger_agent: Option<std::net::SocketAddr>,
) -> Result<
consensus_common::import_queue::BasicQueue<Block, PrefixedMemoryDB<BlakeTwo256>>,
TaskManager,
),
{
config.keystore = service::config::KeystoreConfig::InMemory;
if config.chain_spec.is_rococo() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. }
= new_partial::<rococo_runtime::RuntimeApi, RococoExecutor>(config, jaeger_agent, None)?;
Ok((Arc::new(Client::Rococo(client)), backend, import_queue, task_manager))
} else if config.chain_spec.is_kusama() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. }
= new_partial::<kusama_runtime::RuntimeApi, KusamaExecutor>(config, jaeger_agent, None)?;
Ok((Arc::new(Client::Kusama(client)), backend, import_queue, task_manager))
} else if config.chain_spec.is_westend() {
let service::PartialComponents { client, backend, import_queue, task_manager, .. }
= new_partial::<westend_runtime::RuntimeApi, WestendExecutor>(config, jaeger_agent, None)?;
Ok((Arc::new(Client::Westend(client)), backend, import_queue, task_manager))
} else {
let service::PartialComponents { client, backend, import_queue, task_manager, .. }
= new_partial::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(config, jaeger_agent, None)?;
Ok((Arc::new(Client::Polkadot(client)), backend, import_queue, task_manager))
}
/// Build a new light node.
pub fn build_light(config: Configuration) -> Result<(
TaskManager,
RpcHandlers,
), Error> {
if config.chain_spec.is_rococo() {
new_light::<rococo_runtime::RuntimeApi, RococoExecutor>(config)
} else if config.chain_spec.is_kusama() {
new_light::<kusama_runtime::RuntimeApi, KusamaExecutor>(config)
} else if config.chain_spec.is_westend() {
new_light::<westend_runtime::RuntimeApi, WestendExecutor>(config)
} else {
new_light::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(config)
}
}
#[cfg(feature = "full-node")]
is_collator: IsCollator,
jaeger_agent: Option<std::net::SocketAddr>,
telemetry_worker_handle: Option<TelemetryWorkerHandle>,
) -> Result<NewFull<Client>, Error> {
let isolation_strategy = {
#[cfg(not(any(target_os = "android", target_os = "unknown")))]
{
let cache_base_path = config.database.path();
IsolationStrategy::external_process_with_caching(cache_base_path)
}
#[cfg(any(target_os = "android", target_os = "unknown"))]
{
IsolationStrategy::InProcess
}
};
if config.chain_spec.is_rococo() {
new_full::<rococo_runtime::RuntimeApi, RococoExecutor>(
config,
is_collator,
isolation_strategy,
).map(|full| full.with_client(Client::Rococo))
} else if config.chain_spec.is_kusama() {
new_full::<kusama_runtime::RuntimeApi, KusamaExecutor>(
config,
is_collator,
isolation_strategy,
).map(|full| full.with_client(Client::Kusama))
} else if config.chain_spec.is_westend() {
new_full::<westend_runtime::RuntimeApi, WestendExecutor>(
config,
is_collator,
isolation_strategy,
).map(|full| full.with_client(Client::Westend))
} else {
new_full::<polkadot_runtime::RuntimeApi, PolkadotExecutor>(
config,
is_collator,
isolation_strategy,
).map(|full| full.with_client(Client::Polkadot))