Unverified Commit 51ce2b31 authored by Ashley's avatar Ashley Committed by GitHub
Browse files

Companion PR for `Remove the service, replacing it with a struct of individual...

Companion PR for `Remove the service, replacing it with a struct of individual chain components` (#1288)

* Switch branch

* Fix service things

* Fix browser node compilation

* Update branch

* fixed new service

* Update for new branch

* Fix browser node

* Update branch

* Revert "Switch branch"

This reverts commit 3623adff

.

* Update cargo.lock
Co-authored-by: Bastian Köcher's avatarBastian Köcher <git@kchr.de>
parent 0762de59
Pipeline #98757 passed with stages
in 26 minutes and 18 seconds
This diff is collapsed.
......@@ -46,8 +46,8 @@ async fn start_inner(chain_spec: String, log_level: String) -> Result<Client, Bo
info!("👤 Role: {}", config.display_role());
// Create the service. This is the most heavy initialization step.
let service = service::kusama_new_light(config)
let (task_manager, rpc_handlers) = service::kusama_new_light(config)
.map_err(|e| format!("{:?}", e))?;
Ok(browser_utils::start_client(service))
Ok(browser_utils::start_client(task_manager, rpc_handlers))
}
......@@ -19,8 +19,7 @@ use log::info;
use service::{IdentifyVariant, self};
#[cfg(feature = "service-rewr")]
use service_new::{IdentifyVariant, self as service};
use sc_executor::NativeExecutionDispatch;
use sc_cli::{SubstrateCli, Result};
use sc_cli::{SubstrateCli, Result, RuntimeVersion, Role};
use crate::cli::{Cli, Subcommand};
fn get_exec_name() -> Option<String> {
......@@ -75,6 +74,16 @@ impl SubstrateCli for Cli {
path => Box::new(service::PolkadotChainSpec::from_json_file(std::path::PathBuf::from(path))?),
})
}
fn native_runtime_version(spec: &Box<dyn service::ChainSpec>) -> &'static RuntimeVersion {
if spec.is_kusama() {
&service::kusama_runtime::VERSION
} else if spec.is_westend() {
&service::westend_runtime::VERSION
} else {
&service::polkadot_runtime::VERSION
}
}
}
/// Parses polkadot specific CLI arguments and run the service.
......@@ -116,56 +125,44 @@ pub fn run() -> Result<()> {
info!(" KUSAMA FOUNDATION ");
info!("----------------------------");
runtime.run_node(
|config| {
service::kusama_new_light(config)
},
|config| {
service::kusama_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(s, _, _)| s)
},
service::KusamaExecutor::native_version().runtime_version
)
runtime.run_node_until_exit(|config| match config.role {
Role::Light => service::kusama_new_light(config)
.map(|(components, _)| components),
_ => service::kusama_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(components, _, _)| components)
})
} else if chain_spec.is_westend() {
runtime.run_node(
|config| {
service::westend_new_light(config)
},
|config| {
service::westend_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(s, _, _)| s)
},
service::WestendExecutor::native_version().runtime_version
)
runtime.run_node_until_exit(|config| match config.role {
Role::Light => service::westend_new_light(config)
.map(|(components, _)| components),
_ => service::westend_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(components, _, _)| components)
})
} else {
runtime.run_node(
|config| {
service::polkadot_new_light(config)
},
|config| {
service::polkadot_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(s, _, _)| s)
},
service::PolkadotExecutor::native_version().runtime_version
)
runtime.run_node_until_exit(|config| match config.role {
Role::Light => service::polkadot_new_light(config)
.map(|(components, _)| components),
_ => service::polkadot_new_full(
config,
None,
None,
authority_discovery_enabled,
6000,
grandpa_pause,
).map(|(components, _, _)| components)
})
}
},
Some(Subcommand::Base(subcommand)) => {
......
......@@ -28,14 +28,14 @@ mod command;
#[cfg(not(feature = "service-rewr"))]
pub use service::{
AbstractService, ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant,
ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant,
Block, self, RuntimeApiCollection, TFullClient
};
#[cfg(feature = "service-rewr")]
pub use service_new::{
self as service,
AbstractService, ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant,
ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant,
Block, self, RuntimeApiCollection, TFullClient
};
......
......@@ -63,7 +63,7 @@ use polkadot_primitives::{
}
};
use polkadot_cli::{
ProvideRuntimeApi, AbstractService, ParachainHost, IdentifyVariant,
ProvideRuntimeApi, ParachainHost, IdentifyVariant,
service::{self, Role}
};
pub use polkadot_cli::service::Configuration;
......@@ -386,7 +386,7 @@ where
}
if config.chain_spec.is_kusama() {
let (service, client, handlers) = service::kusama_new_full(
let (task_manager, client, handlers) = service::kusama_new_full(
config,
Some((key.public(), para_id)),
None,
......@@ -394,7 +394,7 @@ where
6000,
None,
)?;
let spawn_handle = service.spawn_task_handle();
let spawn_handle = task_manager.spawn_handle();
build_collator_service(
spawn_handle,
handlers,
......@@ -404,7 +404,7 @@ where
build_parachain_context
)?.await;
} else if config.chain_spec.is_westend() {
let (service, client, handlers) = service::westend_new_full(
let (task_manager, client, handlers) = service::westend_new_full(
config,
Some((key.public(), para_id)),
None,
......@@ -412,7 +412,7 @@ where
6000,
None,
)?;
let spawn_handle = service.spawn_task_handle();
let spawn_handle = task_manager.spawn_handle();
build_collator_service(
spawn_handle,
handlers,
......@@ -422,7 +422,7 @@ where
build_parachain_context
)?.await;
} else {
let (service, client, handles) = service::polkadot_new_full(
let (task_manager, client, handles) = service::polkadot_new_full(
config,
Some((key.public(), para_id)),
None,
......@@ -430,7 +430,7 @@ where
6000,
None,
)?;
let spawn_handle = service.spawn_task_handle();
let spawn_handle = task_manager.spawn_handle();
build_collator_service(
spawn_handle,
handles,
......
......@@ -35,9 +35,9 @@ use polkadot_overseer::{
CandidateValidationMessage, CandidateBackingMessage,
};
pub use service::{
AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis,
Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec, ServiceBuilderCommand,
Configuration, ChainSpec, ServiceBuilderCommand, ServiceComponents, TaskManager,
};
pub use service::config::{DatabaseConfig, PrometheusConfig};
pub use sc_executor::NativeExecutionDispatch;
......@@ -321,7 +321,10 @@ macro_rules! new_full {
let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) =
new_full_start!($config, $runtime, $dispatch);
let service = builder
let ServiceComponents {
client, network, select_chain, keystore, transaction_pool, prometheus_registry,
task_manager, telemetry_on_connect_sinks, ..
} = builder
.with_finality_proof_provider(|client, backend| {
let provider = client as Arc<dyn grandpa::StorageAndProofProvider<_, _>>;
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _)
......@@ -334,11 +337,9 @@ macro_rules! new_full {
let shared_voter_state = rpc_setup.take()
.expect("The SharedVoterState is present for Full Services or setup failed before. qed");
let client = service.client();
let overseer_client = service.client();
let spawner = service.spawn_task_handle();
let leaves: Vec<_> = service.select_chain().ok_or(ServiceError::SelectChainRequired)?
let overseer_client = client.clone();
let spawner = task_manager.spawn_handle();
let leaves: Vec<_> = select_chain.clone().ok_or(ServiceError::SelectChainRequired)?
.leaves()
.unwrap_or_else(|_| vec![])
.into_iter()
......@@ -356,7 +357,7 @@ macro_rules! new_full {
let (overseer, handler) = real_overseer(leaves, spawner)?;
service.spawn_essential_task_handle().spawn("overseer", Box::pin(async move {
task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move {
use futures::{pin_mut, select, FutureExt};
let forward = overseer::forward_events(overseer_client, handler);
......@@ -377,24 +378,24 @@ macro_rules! new_full {
}));
if role.is_authority() {
let select_chain = service.select_chain().ok_or(ServiceError::SelectChainRequired)?;
let select_chain = select_chain.ok_or(ServiceError::SelectChainRequired)?;
let can_author_with =
consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone());
// TODO: custom proposer (https://github.com/paritytech/polkadot/issues/1248)
let proposer = sc_basic_authorship::ProposerFactory::new(
client.clone(),
service.transaction_pool(),
transaction_pool,
None,
);
let babe_config = babe::BabeParams {
keystore: service.keystore(),
keystore: keystore.clone(),
client: client.clone(),
select_chain,
block_import,
env: proposer,
sync_oracle: service.network(),
sync_oracle: network.clone(),
inherent_data_providers: inherent_data_providers.clone(),
force_authoring,
babe_link,
......@@ -402,13 +403,13 @@ macro_rules! new_full {
};
let babe = babe::start_babe(babe_config)?;
service.spawn_essential_task_handle().spawn_blocking("babe", babe);
task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
}
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if is_authority {
Some(service.keystore() as BareCryptoStorePtr)
Some(keystore.clone() as BareCryptoStorePtr)
} else {
None
};
......@@ -454,15 +455,15 @@ macro_rules! new_full {
let grandpa_config = grandpa::GrandpaParams {
config,
link: link_half,
network: service.network(),
network: network.clone(),
inherent_data_providers: inherent_data_providers.clone(),
telemetry_on_connect: Some(service.telemetry_on_connect_stream()),
telemetry_on_connect: Some(telemetry_on_connect_sinks.on_connect_stream()),
voting_rule,
prometheus_registry: service.prometheus_registry(),
prometheus_registry: prometheus_registry,
shared_voter_state,
};
service.spawn_essential_task_handle().spawn_blocking(
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
grandpa::run_grandpa_voter(grandpa_config)?
);
......@@ -470,11 +471,11 @@ macro_rules! new_full {
grandpa::setup_disabled_grandpa(
client.clone(),
&inherent_data_providers,
service.network(),
network.clone(),
)?;
}
(service, client)
(task_manager, client)
}}
}
......@@ -566,6 +567,7 @@ macro_rules! new_light {
Ok(polkadot_rpc::create_light(light_deps))
})?
.build_light()
.map(|ServiceComponents { task_manager, .. }| task_manager)
}}
}
......@@ -595,7 +597,7 @@ pub fn polkadot_new_full(
grandpa_pause: Option<(u32, u32)>,
)
-> Result<(
impl AbstractService,
TaskManager,
Arc<impl PolkadotClient<
Block,
TFullBackend<Block>,
......@@ -604,7 +606,7 @@ pub fn polkadot_new_full(
FullNodeHandles,
), ServiceError>
{
let (service, client) = new_full!(
let (components, client) = new_full!(
config,
collating_for,
authority_discovery_enabled,
......@@ -613,7 +615,7 @@ pub fn polkadot_new_full(
PolkadotExecutor,
);
Ok((service, client, FullNodeHandles))
Ok((components, client, FullNodeHandles))
}
/// Create a new Kusama service for a full node.
......@@ -626,7 +628,7 @@ pub fn kusama_new_full(
_slot_duration: u64,
grandpa_pause: Option<(u32, u32)>,
) -> Result<(
impl AbstractService,
TaskManager,
Arc<impl PolkadotClient<
Block,
TFullBackend<Block>,
......@@ -636,7 +638,7 @@ pub fn kusama_new_full(
FullNodeHandles,
), ServiceError>
{
let (service, client) = new_full!(
let (components, client) = new_full!(
config,
collating_for,
authority_discovery_enabled,
......@@ -645,7 +647,7 @@ pub fn kusama_new_full(
KusamaExecutor,
);
Ok((service, client, FullNodeHandles))
Ok((components, client, FullNodeHandles))
}
/// Create a new Kusama service for a full node.
......@@ -659,7 +661,7 @@ pub fn westend_new_full(
grandpa_pause: Option<(u32, u32)>,
)
-> Result<(
impl AbstractService,
TaskManager,
Arc<impl PolkadotClient<
Block,
TFullBackend<Block>,
......@@ -668,7 +670,7 @@ pub fn westend_new_full(
FullNodeHandles,
), ServiceError>
{
let (service, client) = new_full!(
let (components, client) = new_full!(
config,
collating_for,
authority_discovery_enabled,
......@@ -677,45 +679,23 @@ pub fn westend_new_full(
WestendExecutor,
);
Ok((service, client, FullNodeHandles))
Ok((components, client, FullNodeHandles))
}
/// Create a new Polkadot service for a light client.
pub fn polkadot_new_light(mut config: Configuration) -> Result<
impl AbstractService<
Block = Block,
RuntimeApi = polkadot_runtime::RuntimeApi,
Backend = TLightBackend<Block>,
SelectChain = LongestChain<TLightBackend<Block>, Block>,
CallExecutor = TLightCallExecutor<Block, PolkadotExecutor>,
>, ServiceError>
pub fn polkadot_new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
{
new_light!(config, polkadot_runtime::RuntimeApi, PolkadotExecutor)
}
/// Create a new Kusama service for a light client.
pub fn kusama_new_light(mut config: Configuration) -> Result<
impl AbstractService<
Block = Block,
RuntimeApi = kusama_runtime::RuntimeApi,
Backend = TLightBackend<Block>,
SelectChain = LongestChain<TLightBackend<Block>, Block>,
CallExecutor = TLightCallExecutor<Block, KusamaExecutor>,
>, ServiceError>
pub fn kusama_new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
{
new_light!(config, kusama_runtime::RuntimeApi, KusamaExecutor)
}
/// Create a new Westend service for a light client.
pub fn westend_new_light(mut config: Configuration, ) -> Result<
impl AbstractService<
Block = Block,
RuntimeApi = westend_runtime::RuntimeApi,
Backend = TLightBackend<Block>,
SelectChain = LongestChain<TLightBackend<Block>, Block>,
CallExecutor = TLightCallExecutor<Block, KusamaExecutor>
>,
ServiceError>
pub fn westend_new_light(mut config: Configuration, ) -> Result<TaskManager, ServiceError>
{
new_light!(config, westend_runtime::RuntimeApi, KusamaExecutor)
}
......@@ -30,9 +30,9 @@ use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
use sc_executor::native_executor_instance;
use log::info;
pub use service::{
AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis,
Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, RpcHandlers,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec, ServiceBuilderCommand,
Configuration, ChainSpec, ServiceBuilderCommand, ServiceComponents, TaskManager,
};
pub use service::config::{DatabaseConfig, PrometheusConfig};
pub use sc_executor::NativeExecutionDispatch;
......@@ -298,7 +298,10 @@ macro_rules! new_full {
let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) =
new_full_start!($config, $runtime, $dispatch);
let service = builder
let ServiceComponents {
client, network, select_chain, keystore, transaction_pool, prometheus_registry,
task_manager, telemetry_on_connect_sinks, ..
} = builder
.with_finality_proof_provider(|client, backend| {
let provider = client as Arc<dyn grandpa::StorageAndProofProvider<_, _>>;
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _)
......@@ -311,16 +314,10 @@ macro_rules! new_full {
let shared_voter_state = rpc_setup.take()
.expect("The SharedVoterState is present for Full Services or setup failed before. qed");
let client = service.client();
let known_oracle = client.clone();
let mut handles = FullNodeHandles::default();
let select_chain = if let Some(select_chain) = service.select_chain() {
select_chain
} else {
info!("The node cannot start as an authority because it can't select chain.");
return Ok((service, client, handles));
};
let select_chain = select_chain.ok_or(ServiceError::SelectChainRequired)?;
let gossip_validator_select_chain = select_chain.clone();
let is_known = move |block_hash: &Hash| {
......@@ -343,13 +340,13 @@ macro_rules! new_full {
};
let polkadot_network_service = network_protocol::start(
service.network(),
network.clone(),
network_protocol::Config {
collating_for: $collating_for,
},
(is_known, client.clone()),
client.clone(),
service.spawn_task_handle(),
task_manager.spawn_handle(),
).map_err(|e| format!("Could not spawn network worker: {:?}", e))?;
let authority_handles = if is_collator || role.is_authority() {
......@@ -380,14 +377,14 @@ macro_rules! new_full {
client: client.clone(),
network: polkadot_network_service.clone(),
collators: polkadot_network_service.clone(),
spawner: service.spawn_task_handle(),
spawner: task_manager.spawn_handle(),
availability_store: availability_store.clone(),
select_chain: select_chain.clone(),
keystore: service.keystore(),
keystore: keystore.clone(),
max_block_data_size,
}.build();
service.spawn_essential_task_handle().spawn("validation-service", Box::pin(validation_service));
task_manager.spawn_essential_handle().spawn("validation-service", Box::pin(validation_service));
handles.validation_service_handle = Some(validation_service_handle.clone());
......@@ -403,30 +400,29 @@ macro_rules! new_full {
let proposer = consensus::ProposerFactory::new(
client.clone(),
service.transaction_pool(),
transaction_pool,
validation_service_handle,
slot_duration,
service.prometheus_registry().as_ref(),
prometheus_registry.as_ref(),
);
let select_chain = service.select_chain().ok_or(ServiceError::SelectChainRequired)?;
let can_author_with =
consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone());
let block_import = availability_store.block_import(
block_import,
client.clone(),
service.spawn_task_handle(),
service.keystore(),
task_manager.spawn_handle(),
keystore.clone(),
)?;
let babe_config = babe::BabeParams {
keystore: service.keystore(),
keystore: keystore.clone(),
client: client.clone(),
select_chain,
block_import,
env: proposer,
sync_oracle: service.network(),
sync_oracle: network.clone(),
inherent_data_providers: inherent_data_providers.clone(),
force_authoring,
babe_link,
......@@ -434,7 +430,7 @@ macro_rules! new_full {
};
let babe = babe::start_babe(babe_config)?;
service.spawn_essential_task_handle().spawn_blocking("babe", babe);
task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
}
if matches!(role, Role::Authority{..} | Role::Sentry{..}) {
......@@ -443,7 +439,7 @@ macro_rules! new_full {
Role::Authority { ref sentry_nodes } => (
sentry_nodes.clone(),
authority_discovery::Role::Authority (
service.keystore(),
keystore.clone(),
),
),
Role::Sentry {..} => (
......@@ -453,29 +449,28 @@ macro_rules! new_full {
_ => unreachable!("Due to outer matches! constraint; qed."),
};
let network = service.network();
let network_event_stream = network.event_stream("authority-discovery");
let dht_event_stream = network_event_stream.filter_map(|e| async move { match e {
Event::Dht(e) => Some(e),
_ => None,
}}).boxed();
let authority_discovery = authority_discovery::AuthorityDiscovery::new(
service.client(),
network,
client.clone(),
network.clone(),
sentries,
dht_event_stream,
authority_discovery_role,
service.prometheus_registry(),
prometheus_registry.clone(),
);
service.spawn_task_handle().spawn("authority-discovery", authority_discovery);
task_manager.spawn_handle().spawn("authority-discovery", authority_discovery);
}
}
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if is_authority {
Some(service.keystore() as BareCryptoStorePtr)
Some(keystore as BareCryptoStorePtr)
} else {
None
};