Newer
Older
"memory" => memory,
"finalized_height" => finalized_number,
"finalized_hash" => ?info.chain.finalized_hash,
"bandwidth_download" => bandwidth_download,
"bandwidth_upload" => bandwidth_upload,
"used_state_cache_size" => used_state_cache_size,
);
"peers".to_owned() => num_peers,
"height".to_owned() => best_number,
"txcount".to_owned() => txpool_status.ready,
"cpu".to_owned() => cpu_usage,
"memory".to_owned() => memory,
"finalized_height".to_owned() => finalized_number,
"bandwidth_download".to_owned() => bandwidth_download,
"bandwidth_upload".to_owned() => bandwidth_upload,
"used_state_cache_size".to_owned() => used_state_cache_size
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
Ok(())
}).select(exit.clone()).then(|_| Ok(()));
let _ = to_spawn_tx.unbounded_send(Box::new(tel_task));
// Periodically send the network state to the telemetry.
let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>();
network_status_sinks.lock().push(std::time::Duration::from_secs(30), netstat_tx);
let tel_task_2 = netstat_rx.for_each(move |(_, network_state)| {
telemetry!(
SUBSTRATE_INFO;
"system.network_state";
"state" => network_state,
);
Ok(())
}).select(exit.clone()).then(|_| Ok(()));
let _ = to_spawn_tx.unbounded_send(Box::new(tel_task_2));
// RPC
let (system_rpc_tx, system_rpc_rx) = futures03::channel::mpsc::unbounded();
let gen_handler = || {
use rpc::{chain, state, author, system};
let system_info = rpc::system::SystemInfo {
chain_name: config.chain_spec.name().into(),
impl_name: config.impl_name.into(),
impl_version: config.impl_version.into(),
properties: config.chain_spec.properties().clone(),
};
let subscriptions = rpc::Subscriptions::new(Arc::new(SpawnTaskHandle {
sender: to_spawn_tx.clone(),
on_exit: exit.clone()
}));
let (chain, state) = if let (Some(remote_backend), Some(on_demand)) =
(remote_backend.as_ref(), on_demand.as_ref()) {
// Light clients
let chain = rpc::chain::new_light(
client.clone(),
subscriptions.clone(),
remote_backend.clone(),
on_demand.clone()
);
let state = rpc::state::new_light(
client.clone(),
subscriptions.clone(),
remote_backend.clone(),
on_demand.clone()
);
(chain, state)
} else {
// Full nodes
let chain = rpc::chain::new_full(client.clone(), subscriptions.clone());
let state = rpc::state::new_full(client.clone(), subscriptions.clone());
(chain, state)
};
let author = rpc::author::Author::new(
client.clone(),
transaction_pool.clone(),
subscriptions,
keystore.clone(),
);
let system = system::System::new(system_info, system_rpc_tx.clone());
rpc_servers::rpc_handler((
state::StateApi::to_delegate(state),
chain::ChainApi::to_delegate(chain),
author::AuthorApi::to_delegate(author),
system::SystemApi::to_delegate(system),
rpc_extensions.clone(),
))
};
let rpc_handlers = gen_handler();
let rpc = start_rpc_servers(&config, gen_handler)?;
let _ = to_spawn_tx.unbounded_send(Box::new(build_network_future(
config.roles,
network_mut,
client.clone(),
network_status_sinks.clone(),
system_rpc_rx,
has_bootnodes,
dht_event_tx,
.map_err(|_| ())
.select(exit.clone())
.then(|_| Ok(()))));
let telemetry_connection_sinks: Arc<Mutex<Vec<mpsc::UnboundedSender<()>>>> = Default::default();
// Telemetry
let telemetry = config.telemetry_endpoints.clone().map(|endpoints| {
let is_authority = config.roles.is_authority();
let network_id = network.local_peer_id().to_base58();
let name = config.name.clone();
let impl_name = config.impl_name.to_owned();
let version = version.clone();
let chain_name = config.chain_spec.name().to_owned();
let telemetry_connection_sinks_ = telemetry_connection_sinks.clone();
let telemetry = tel::init_telemetry(tel::TelemetryConfig {
endpoints,
wasm_external_transport: config.telemetry_external_transport.take(),
});
let startup_time = SystemTime::UNIX_EPOCH.elapsed()
.map(|dur| dur.as_millis())
.unwrap_or(0);
let future = telemetry.clone()
.map(|ev| Ok::<_, ()>(ev))
.compat()
.for_each(move |event| {
// Safe-guard in case we add more events in the future.
let tel::TelemetryEvent::Connected = event;
telemetry!(SUBSTRATE_INFO; "system.connected";
"name" => name.clone(),
"implementation" => impl_name.clone(),
"version" => version.clone(),
"config" => "",
"chain" => chain_name.clone(),
"authority" => is_authority,
"startup_time" => startup_time,
"network_id" => network_id.clone()
);
telemetry_connection_sinks_.lock().retain(|sink| {
sink.unbounded_send(()).is_ok()
});
Ok(())
});
let _ = to_spawn_tx.unbounded_send(Box::new(future
.select(exit.clone())
.then(|_| Ok(()))));
telemetry
});
// Grafana data source
if let Some(port) = config.grafana_port {
let future = select(
grafana_data_source::run_server(port).boxed(),
exit.clone().compat()
).map(|either| match either {
Either::Left((result, _)) => result.map_err(|_| ()),
Either::Right(_) => Ok(())
}).compat();
let _ = to_spawn_tx.unbounded_send(Box::new(future));
}
// Instrumentation
if let Some(tracing_targets) = config.tracing_targets.as_ref() {
let subscriber = substrate_tracing::ProfilingSubscriber::new(
config.tracing_receiver, tracing_targets
);
match tracing::subscriber::set_global_default(subscriber) {
Ok(_) => (),
Err(e) => error!(target: "tracing", "Unable to set global default subscriber {}", e),
}
}
Ok(Service {
client,
network,
network_status_sinks,
select_chain,
transaction_pool,
exit,
signal: Some(signal),
essential_failed_tx,
essential_failed_rx,
to_spawn_tx,
to_spawn_rx,
to_poll: Vec::new(),
rpc_handlers,
_rpc: rpc,
_telemetry: telemetry,
_offchain_workers: offchain_workers,
_telemetry_on_connect_sinks: telemetry_connection_sinks.clone(),
keystore,
marker: PhantomData::<TBl>,
})
}
}
pub(crate) fn maintain_transaction_pool<Api, Backend, Block, Executor, PoolApi>(
id: &BlockId<Block>,
client: &Arc<Client<Backend, Executor, Block, Api>>,
transaction_pool: &TransactionPool<PoolApi>,
retracted: &[Block::Hash],
) -> error::Result<Box<dyn Future<Item = (), Error = ()> + Send>> where
Block: BlockT<Hash = <Blake2Hasher as primitives::Hasher>::Out>,
Backend: 'static + client_api::backend::Backend<Block, Blake2Hasher>,
Client<Backend, Executor, Block, Api>: ProvideRuntimeApi,
<Client<Backend, Executor, Block, Api> as ProvideRuntimeApi>::Api:
tx_pool_api::TaggedTransactionQueue<Block>,
Executor: 'static + client::CallExecutor<Block, Blake2Hasher>,
PoolApi: 'static + txpool::ChainApi<Hash = Block::Hash, Block = Block>,
Api: 'static,
// Put transactions from retracted blocks back into the pool.
let client_copy = client.clone();
let retracted_transactions = retracted.to_vec().into_iter()
.filter_map(move |hash| client_copy.block(&BlockId::hash(hash)).ok().unwrap_or(None))
.flat_map(|block| block.block.deconstruct().1.into_iter())
.filter(|tx| tx.is_signed().unwrap_or(false));
let resubmit_future = transaction_pool
.submit_at(id, retracted_transactions, true)
.then(|resubmit_result| ready(match resubmit_result {
Ok(_) => Ok(()),
Err(e) => {
warn!("Error re-submitting transactions: {:?}", e);
Ok(())
}))
.compat();
// Avoid calling into runtime if there is nothing to prune from the pool anyway.
if transaction_pool.status().is_empty() {
return Ok(Box::new(resubmit_future))
let block = client.block(id)?;
Ok(match block {
Some(block) => {
let parent_id = BlockId::hash(*block.block.header().parent_hash());
let prune_future = transaction_pool
.prune(id, &parent_id, block.block.extrinsics())
.boxed()
.compat()
.map_err(|e| { format!("{:?}", e); });
Box::new(resubmit_future.and_then(|_| prune_future))
},
None => Box::new(resubmit_future),
})
}
#[cfg(test)]
mod tests {
use super::*;
use futures03::executor::block_on;
use consensus_common::{BlockOrigin, SelectChain};
use substrate_test_runtime_client::{prelude::*, runtime::Transfer};
#[test]
fn should_remove_transactions_from_the_pool() {
let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain();
let client = Arc::new(client);
let pool = TransactionPool::new(Default::default(), ::transaction_pool::FullChainApi::new(client.clone()));
let transaction = Transfer {
amount: 5,
nonce: 0,
from: AccountKeyring::Alice.into(),
to: Default::default(),
}.into_signed_tx();
let best = longest_chain.best_chain().unwrap();
// store the transaction in the pool
block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap();
// import the block
let mut builder = client.new_block(Default::default()).unwrap();
builder.push(transaction.clone()).unwrap();
let block = builder.bake().unwrap();
let id = BlockId::hash(block.header().hash());
client.import(BlockOrigin::Own, block).unwrap();
// fire notification - this should clean up the queue
assert_eq!(pool.status().ready, 1);
maintain_transaction_pool(
&id,
&client,
&pool,
).unwrap().wait().unwrap();
// then
assert_eq!(pool.status().ready, 0);
assert_eq!(pool.status().future, 0);
}
#[test]
fn should_add_reverted_transactions_to_the_pool() {
let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain();
let client = Arc::new(client);
let pool = TransactionPool::new(Default::default(), ::transaction_pool::FullChainApi::new(client.clone()));
let transaction = Transfer {
amount: 5,
nonce: 0,
from: AccountKeyring::Alice.into(),
to: Default::default(),
}.into_signed_tx();
let best = longest_chain.best_chain().unwrap();
// store the transaction in the pool
block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap();
// import the block
let mut builder = client.new_block(Default::default()).unwrap();
builder.push(transaction.clone()).unwrap();
let block = builder.bake().unwrap();
let block1_hash = block.header().hash();
let id = BlockId::hash(block1_hash.clone());
client.import(BlockOrigin::Own, block).unwrap();
// fire notification - this should clean up the queue
assert_eq!(pool.status().ready, 1);
maintain_transaction_pool(
&id,
&client,
&pool,
&[]
).unwrap().wait().unwrap();
// then
assert_eq!(pool.status().ready, 0);
assert_eq!(pool.status().future, 0);
// import second block
let builder = client.new_block_at(&BlockId::hash(best.hash()), Default::default()).unwrap();
let block = builder.bake().unwrap();
let id = BlockId::hash(block.header().hash());
client.import(BlockOrigin::Own, block).unwrap();
// fire notification - this should add the transaction back to the pool.
maintain_transaction_pool(
&id,
&client,
&pool,
&[block1_hash]
).unwrap().wait().unwrap();
// then
assert_eq!(pool.status().ready, 1);
assert_eq!(pool.status().future, 0);
}