Unverified Commit dca93d01 authored by Bernhard Schuster's avatar Bernhard Schuster Committed by GitHub
Browse files

addition error definitions (#2107)

* remove low information density error doc comments

* another round of error dancing

* fix compilation

* remove stale `None` argument

* adjust test, minor slip in command

* only add AvailabilityError for full node features

* another None where none shuld be
parent 381cae23
Pipeline #116769 failed with stages
in 31 minutes and 38 seconds
......@@ -5564,6 +5564,7 @@ dependencies = [
"sp-transaction-pool",
"sp-trie",
"substrate-prometheus-endpoint",
"thiserror",
"tracing",
"tracing-futures",
"westend-runtime",
......
......@@ -16,9 +16,32 @@
use log::info;
use service::{IdentifyVariant, self};
use sc_cli::{SubstrateCli, Result, RuntimeVersion, Role};
use sc_cli::{SubstrateCli, RuntimeVersion, Role};
use crate::cli::{Cli, Subcommand};
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
PolkadotService(#[from] service::Error),
#[error(transparent)]
SubstrateCli(#[from] sc_cli::Error),
#[error(transparent)]
SubstrateService(#[from] sc_service::Error),
#[error("Other: {0}")]
Other(String),
}
impl std::convert::From<String> for Error {
fn from(s: String) -> Self {
Self::Other(s)
}
}
type Result<T> = std::result::Result<T, Error>;
fn get_exec_name() -> Option<String> {
std::env::current_exe()
.ok()
......@@ -139,22 +162,27 @@ pub fn run() -> Result<()> {
info!("----------------------------");
}
runner.run_node_until_exit(|config| async move {
Ok(runner.run_node_until_exit(move |config| async move {
let role = config.role.clone();
match role {
Role::Light => service::build_light(config).map(|(task_manager, _)| task_manager),
let task_manager = match role {
Role::Light => service::build_light(config).map(|(task_manager, _)| task_manager)
.map_err(|e| sc_service::Error::Other(e.to_string()) ),
_ => service::build_full(
config,
service::IsCollator::No,
grandpa_pause,
).map(|full| full.task_manager),
}
})
).map(|full| full.task_manager)
.map_err(|e| sc_service::Error::Other(e.to_string()) )
};
task_manager
}).map_err(|e| -> sc_cli::Error { e.into() })?)
},
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
Ok(runner.sync_run(|config| cmd.run(config.chain_spec, config.network))?)
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
......@@ -163,7 +191,8 @@ pub fn run() -> Result<()> {
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, _, import_queue, task_manager) = service::new_chain_ops(&mut config)?;
let (client, _, import_queue, task_manager) = service::new_chain_ops(&mut config)
.map_err(|e| sc_service::Error::Other(e.to_string()))?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
......@@ -174,7 +203,8 @@ pub fn run() -> Result<()> {
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, _, _, task_manager) = service::new_chain_ops(&mut config)?;
let (client, _, _, task_manager) = service::new_chain_ops(&mut config)
.map_err(|e| sc_service::Error::Other(e.to_string()))?;
Ok((cmd.run(client, config.database), task_manager))
})
},
......@@ -185,7 +215,8 @@ pub fn run() -> Result<()> {
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, _, _, task_manager) = service::new_chain_ops(&mut config)?;
let (client, _, _, task_manager) = service::new_chain_ops(&mut config)
.map_err(|e| sc_service::Error::Other(e.to_string()))?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
},
......@@ -196,13 +227,15 @@ pub fn run() -> Result<()> {
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, _, import_queue, task_manager) = service::new_chain_ops(&mut config)?;
let (client, _, import_queue, task_manager) = service::new_chain_ops(&mut config)
.map_err(|e| sc_service::Error::Other(e.to_string()))?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
Ok(runner.sync_run(|config| cmd.run(config.database))
.map_err(|e| sc_service::Error::Other(e.to_string()))?)
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
......@@ -211,7 +244,8 @@ pub fn run() -> Result<()> {
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, backend, _, task_manager) = service::new_chain_ops(&mut config)?;
let (client, backend, _, task_manager) = service::new_chain_ops(&mut config)
.map_err(|e| sc_service::Error::Other(e.to_string()))?;
Ok((cmd.run(client, backend), task_manager))
})
},
......@@ -237,5 +271,6 @@ pub fn run() -> Result<()> {
})
},
Some(Subcommand::Key(cmd)) => cmd.run(),
}
}?;
Ok(())
}
......@@ -44,7 +44,6 @@ use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityStoreMessage, ChainApiMessage, RuntimeApiMessage, RuntimeApiRequest,
};
use thiserror::Error;
const LOG_TARGET: &str = "availability";
......@@ -54,22 +53,32 @@ mod columns {
pub const NUM_COLUMNS: u32 = 2;
}
#[derive(Debug, Error)]
enum Error {
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
RuntimeApi(#[from] RuntimeApiError),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Erasure(#[from] erasure::Error),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
#[error(transparent)]
Time(#[from] SystemTimeError),
#[error("Custom databases are not supported")]
CustomDatabase,
}
impl Error {
......@@ -418,10 +427,10 @@ pub struct Config {
}
impl std::convert::TryFrom<sc_service::config::DatabaseConfig> for Config {
type Error = &'static str;
type Error = Error;
fn try_from(config: sc_service::config::DatabaseConfig) -> Result<Self, Self::Error> {
let path = config.path().ok_or("custom databases are not supported")?;
let path = config.path().ok_or(Error::CustomDatabase)?;
Ok(Self {
// substrate cache size is improper here; just use the default
......
......@@ -35,7 +35,6 @@ use polkadot_node_subsystem_util::{
use polkadot_primitives::v1::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex};
use std::{pin::Pin, time::Duration, iter::FromIterator};
use wasm_timer::{Delay, Instant};
use thiserror::Error;
/// Delay between starting a bitfield signing job and its attempting to create a bitfield.
const JOB_DELAY: Duration = Duration::from_millis(1500);
......@@ -45,24 +44,24 @@ const LOG_TARGET: &str = "bitfield_signing";
pub struct BitfieldSigningJob;
/// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`.
#[derive(Debug, Error)]
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
/// error propagated from the utility subsystem
#[error(transparent)]
Util(#[from] util::Error),
/// io error
#[error(transparent)]
Io(#[from] std::io::Error),
/// a one shot channel was canceled
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
/// a mspc channel failed to send
#[error(transparent)]
MpscSend(#[from] mpsc::SendError),
/// the runtime API failed to return what we wanted
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
/// the keystore failed to process signing request
#[error("Keystore failed: {0:?}")]
Keystore(KeystoreError),
}
......
......@@ -57,6 +57,7 @@ hex-literal = "0.3.1"
tracing = "0.1.22"
tracing-futures = "0.2.4"
serde = { version = "1.0.118", features = ["derive"] }
thiserror = "1.0.21"
# Polkadot
polkadot-node-core-proposer = { path = "../core/proposer" }
......
......@@ -27,9 +27,9 @@ use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
use {
std::convert::TryInto,
std::time::Duration,
tracing::info,
polkadot_node_core_av_store::Config as AvailabilityConfig,
polkadot_node_core_av_store::Error as AvailabilityError,
polkadot_node_core_proposer::ProposerFactory,
polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler},
polkadot_primitives::v1::ParachainHost,
......@@ -56,7 +56,7 @@ pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor};
pub use sc_consensus::LongestChain;
pub use sc_executor::NativeExecutionDispatch;
pub use service::{
Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis,
Role, PruningMode, TransactionPoolOptions, Error as SubstrateServiceError, RuntimeGenesis,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec, TaskManager,
};
......@@ -97,6 +97,38 @@ native_executor_instance!(
frame_benchmarking::benchmarking::HostFunctions,
);
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
AddrFormatInvalid(#[from] std::net::AddrParseError),
#[error(transparent)]
Sub(#[from] SubstrateServiceError),
#[error(transparent)]
Blockchain(#[from] sp_blockchain::Error),
#[error(transparent)]
Consensus(#[from] consensus_common::Error),
#[error("Failed to create an overseer")]
Overseer(#[from] polkadot_overseer::SubsystemError),
#[error(transparent)]
Prometheus(#[from] prometheus_endpoint::PrometheusError),
#[cfg(feature = "full-node")]
#[error(transparent)]
Availability(#[from] AvailabilityError),
#[error("Authorities require the real overseer implementation")]
AuthoritiesRequireRealOverseer,
}
/// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network.
pub trait IdentifyVariant {
/// Returns if this is a configuration for the `Kusama` network.
......@@ -304,7 +336,7 @@ where
AllSubsystems::<()>::dummy(),
registry,
spawner,
).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e)))
).map_err(|e| e.into())
}
#[cfg(all(feature = "full-node", feature = "real-overseer"))]
......@@ -418,7 +450,7 @@ where
all_subsystems,
registry,
spawner,
).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e)))
).map_err(|e| e.into())
}
#[cfg(feature = "full-node")]
......@@ -529,7 +561,7 @@ pub fn new_full<RuntimeApi, Executor>(
let telemetry_connection_sinks = service::TelemetryConnectionSinks::default();
let availability_config = config.database.clone().try_into();
let availability_config = config.database.clone().try_into().map_err(Error::Availability)?;
let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
config,
......@@ -605,7 +637,7 @@ pub fn new_full<RuntimeApi, Executor>(
leaves,
keystore_container.sync_keystore(),
overseer_client.clone(),
availability_config?,
availability_config,
network.clone(),
authority_discovery_service,
prometheus_registry.as_ref(),
......@@ -644,7 +676,7 @@ pub fn new_full<RuntimeApi, Executor>(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
overseer_handler.as_ref().ok_or("authorities require real overseer handlers")?.clone(),
overseer_handler.as_ref().ok_or_else(|| Error::AuthoritiesRequireRealOverseer)?.clone(),
prometheus_registry.as_ref(),
);
......
......@@ -77,7 +77,7 @@ impl PartialEq for ActiveLeavesUpdate {
///
/// Instead, it means equality when `activated` and `deactivated` are considered as sets.
fn eq(&self, other: &Self) -> bool {
self.activated.len() == other.activated.len() && self.deactivated.len() == other.deactivated.len()
self.activated.len() == other.activated.len() && self.deactivated.len() == other.deactivated.len()
&& self.activated.iter().all(|a| other.activated.contains(a))
&& self.deactivated.iter().all(|a| other.deactivated.contains(a))
}
......@@ -151,13 +151,13 @@ pub enum SubsystemError {
/// An additional anotation tag for the origin of `source`.
origin: &'static str,
/// The wrapped error. Marked as source for tracking the error chain.
#[source] source: Box<dyn std::error::Error + Send>
#[source] source: Box<dyn 'static + std::error::Error + Send + Sync>
},
}
impl SubsystemError {
/// Adds a `str` as `origin` to the given error `err`.
pub fn with_origin<E: 'static + Send + std::error::Error>(origin: &'static str, err: E) -> Self {
pub fn with_origin<E: 'static + Send + Sync + std::error::Error>(origin: &'static str, err: E) -> Self {
Self::FromOrigin { origin, source: Box::new(err) }
}
}
......
......@@ -28,7 +28,7 @@ use polkadot_primitives::v1::{
};
use polkadot_runtime_common::BlockHashCount;
use polkadot_service::{
NewFull, FullClient, ClientHandle, ExecuteWithClient, IsCollator,
Error, NewFull, FullClient, ClientHandle, ExecuteWithClient, IsCollator,
};
use polkadot_node_subsystem::messages::{CollatorProtocolMessage, CollationGenerationMessage};
use polkadot_test_runtime::{
......@@ -45,7 +45,6 @@ use sc_network::{
};
use service::{
config::{DatabaseConfig, KeystoreConfig, MultiaddrWithPeerId, WasmExecutionMethod},
error::Error as ServiceError,
RpcHandlers, TaskExecutor, TaskManager,
};
use service::{BasePath, Configuration, Role};
......@@ -76,14 +75,14 @@ pub fn new_full(
is_collator: IsCollator,
) -> Result<
NewFull<Arc<Client>>,
ServiceError,
Error,
> {
polkadot_service::new_full::<polkadot_test_runtime::RuntimeApi, PolkadotTestExecutor>(
config,
is_collator,
None,
polkadot_parachain::wasm_executor::IsolationStrategy::InProcess,
).map_err(Into::into)
)
}
/// A wrapper for the test client that implements `ClientHandle`.
......
......@@ -19,7 +19,7 @@
use polkadot_node_primitives::CollationGenerationConfig;
use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage};
use polkadot_primitives::v1::Id as ParaId;
use sc_cli::{Result, Role, SubstrateCli};
use sc_cli::{Result, Error as SubstrateCliError, Role, SubstrateCli};
use sp_core::hexdisplay::HexDisplay;
use test_parachain_adder_collator::Collator;
......@@ -46,7 +46,8 @@ fn main() -> Result<()> {
Ok(())
}
None => {
let runner = cli.create_runner(&cli.run.base)?;
let runner = cli.create_runner(&cli.run.base)
.map_err(|e| SubstrateCliError::Application(Box::new(e) as Box::<(dyn 'static + Send + Sync + std::error::Error)>))?;
runner.run_node_until_exit(|config| async move {
let role = config.role.clone();
......@@ -60,7 +61,7 @@ fn main() -> Result<()> {
config,
polkadot_service::IsCollator::Yes(collator.collator_id()),
None,
)?;
).map_err(|e| e.to_string())?;
let mut overseer_handler = full_node
.overseer_handler
.expect("Overseer handler should be initialized for collators");
......@@ -94,5 +95,6 @@ fn main() -> Result<()> {
}
})
}
}
}?;
Ok(())
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment