Unverified Commit b86b5174 authored by Squirrel's avatar Squirrel Committed by Zeke Mostov
Browse files

companion PR to removal of light client (#4105)

* not needed

* cargo fmt

* remove light-node

* make full-node feature mandatory?

* cargo fmt

* Revert "cargo fmt"

This reverts commit 5a2c7d85.

* Revert "make full-node feature mandatory?"

This reverts commit c35eb4fa

.

* bump versionn numbers

* Update bridges/primitives/chain-westend/src/lib.rs

* fix typo + expand dictionary

* update lockfile for substrate
Co-authored-by: default avatarBastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: parity-processbot <>
parent 6e2404d8
This diff is collapsed.
......@@ -18,6 +18,7 @@ BFT/M
bitfield/MS
blake2/MS
blockchain/MS
boolean
borked
BridgeStorage
BlockNumber
......@@ -30,6 +31,7 @@ ChainTime
chain_getBlock
choosen
config/MS
cooldown
crypto/MS
customizable/B
Debian/M
......
......@@ -103,7 +103,7 @@ struct RunningTask {
/// Index of validator group to fetch the chunk from.
///
/// Needef for reporting bad validators.
/// Needed for reporting bad validators.
group_index: GroupIndex,
/// Validators to request the chunk from.
......
......@@ -152,8 +152,6 @@ full-node = [
"kvdb-rocksdb"
]
light-node = []
# Configure the native runtimes to use. Polkadot is enabled by default.
#
# Validators require the native runtime currently
......
......@@ -101,8 +101,7 @@ pub use sc_executor::NativeExecutionDispatch;
pub use service::{
config::{DatabaseSource, PrometheusConfig},
ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis,
TFullBackend, TFullCallExecutor, TFullClient, TLightBackend, TLightCallExecutor, TLightClient,
TaskManager, TransactionPoolOptions,
TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions,
};
pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi, StateBackend};
pub use sp_runtime::{
......@@ -313,13 +312,6 @@ type FullGrandpaBlockImport<RuntimeApi, ExecutorDispatch, ChainSelection = FullS
ChainSelection,
>;
#[cfg(feature = "light-node")]
type LightBackend = service::TLightBackendWithHash<Block, sp_runtime::traits::BlakeTwo256>;
#[cfg(feature = "light-node")]
type LightClient<RuntimeApi, ExecutorDispatch> =
service::TLightClientWithBackend<Block, RuntimeApi, ExecutorDispatch, LightBackend>;
#[cfg(feature = "full-node")]
struct Basics<RuntimeApi, ExecutorDispatch>
where
......@@ -1127,168 +1119,6 @@ where
Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend })
}
/// Builds a new service for a light client.
#[cfg(feature = "light-node")]
fn new_light<Runtime, Dispatch>(
mut config: Configuration,
) -> Result<(TaskManager, RpcHandlers), Error>
where
Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
Dispatch: NativeExecutionDispatch + 'static,
{
set_prometheus_registry(&mut config)?;
use sc_client_api::backend::RemoteBackend;
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, mut task_manager, on_demand) =
service::new_light_parts::<Block, Runtime, Dispatch>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
config.network.extra_sets.push(grandpa::grandpa_peers_set_config());
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
on_demand.clone(),
));
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let justification_import = grandpa_block_import.clone();
let (babe_block_import, babe_link) = babe::block_import(
babe::Config::get_or_compute(&*client)?,
grandpa_block_import,
client.clone(),
)?;
// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let slot_duration = babe_link.config().slot_duration();
let import_queue = babe::import_queue(
babe_link,
babe_block_import,
Some(Box::new(justification_import)),
client.clone(),
select_chain.clone(),
move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
*timestamp,
slot_duration,
);
Ok((timestamp, slot))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
consensus_common::NeverCanAuthor,
telemetry.as_ref().map(|x| x.handle()),
)?;
let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
));
let (network, system_rpc_tx, network_starter) =
service::build_network(service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
warp_sync: Some(warp_sync),
})?;
let enable_grandpa = !config.disable_grandpa;
if enable_grandpa {
let name = config.network.node_name.clone();
let config = grandpa::Config {
gossip_duration: Duration::from_millis(1000),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: None,
local_role: config.role.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
task_manager.spawn_handle().spawn_blocking(
"grandpa-observer",
grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
);
}
if config.offchain_worker.enabled {
let _ = service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}
let light_deps = polkadot_rpc::LightDeps {
remote_blockchain: backend.remote_blockchain(),
fetcher: on_demand.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
};
let rpc_extensions = polkadot_rpc::create_light(light_deps);
let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
on_demand: Some(on_demand),
remote_blockchain: Some(backend.remote_blockchain()),
rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
task_manager: &mut task_manager,
config,
keystore: keystore_container.sync_keystore(),
backend,
transaction_pool,
client,
network,
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
network_starter.start_network();
Ok((task_manager, rpc_handlers))
}
#[cfg(feature = "full-node")]
macro_rules! chain_ops {
($config:expr, $jaeger_agent:expr, $telemetry_worker_handle:expr; $scope:ident, $executor:ident, $variant:ident) => {{
......@@ -1356,33 +1186,6 @@ pub fn new_chain_ops(
Err(Error::NoRuntime)
}
/// Build a new light node.
#[cfg(feature = "light-node")]
pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), Error> {
#[cfg(feature = "rococo-native")]
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
return new_light::<rococo_runtime::RuntimeApi, RococoExecutorDispatch>(config)
}
#[cfg(feature = "kusama-native")]
if config.chain_spec.is_kusama() {
return new_light::<kusama_runtime::RuntimeApi, KusamaExecutorDispatch>(config)
}
#[cfg(feature = "westend-native")]
if config.chain_spec.is_westend() {
return new_light::<westend_runtime::RuntimeApi, WestendExecutorDispatch>(config)
}
#[cfg(feature = "polkadot-native")]
{
return new_light::<polkadot_runtime::RuntimeApi, PolkadotExecutorDispatch>(config)
}
#[cfg(not(feature = "polkadot-native"))]
Err(Error::NoRuntime)
}
#[cfg(feature = "full-node")]
pub fn build_full(
config: Configuration,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment