Commit 686cbb78 authored by Cecile Tonglet's avatar Cecile Tonglet
Browse files

Merge commit dddf83f7 (conflicts)

Parent branch: origin/master
Forked at: 77de8b91
parents 2ddf7f5c dddf83f7
......@@ -170,7 +170,6 @@ where
grandpa_pause,
).map(|(s, _)| s),
)
//
}
// We can't simply use `service::TLightClient` due to a
......
......@@ -64,7 +64,7 @@ use polkadot_primitives::{
};
use polkadot_cli::{
ProvideRuntimeApi, AbstractService, ParachainHost, IsKusama,
service::{self, Roles}
service::{self, Role}
};
pub use polkadot_cli::service::Configuration;
pub use polkadot_cli::Cli;
......@@ -346,8 +346,8 @@ where
<P::ParachainContext as ParachainContext>::ProduceCandidate: Send,
{
let is_kusama = config.chain_spec.is_kusama();
match (is_kusama, config.roles) {
(_, Roles::LIGHT) => return Err(
match (is_kusama, &config.role) {
(_, Role::Light) => return Err(
polkadot_service::Error::Other("light nodes are unsupported as collator".into())
).into(),
(true, _) =>
......
......@@ -51,7 +51,7 @@
use sp_runtime::traits::{BlakeTwo256, Hash as HashT};
use sp_blockchain::Error as ClientError;
use sc_network::{config::Roles, PeerId, ReputationChange};
use sc_network::{ObservedRole, PeerId, ReputationChange};
use sc_network::NetworkService;
use sc_network_gossip::{
ValidationResult as GossipValidationResult,
......@@ -635,7 +635,7 @@ impl<C: ChainContext + ?Sized> MessageValidator<C> {
}
impl<C: ChainContext + ?Sized> sc_network_gossip::Validator<Block> for MessageValidator<C> {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: Roles) {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: ObservedRole) {
let mut inner = self.inner.write();
inner.peers.insert(who.clone(), PeerData::default());
}
......@@ -833,7 +833,7 @@ mod tests {
let peer_a = PeerId::random();
let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();
......@@ -911,7 +911,7 @@ mod tests {
let peer_a = PeerId::random();
let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();
......@@ -953,7 +953,7 @@ mod tests {
let peer_a = PeerId::random();
let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();
......@@ -1007,7 +1007,7 @@ mod tests {
let peer_a = PeerId::random();
let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();
......@@ -1099,7 +1099,7 @@ mod tests {
let peer_a = PeerId::random();
let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full);
assert!(validator_context.events.is_empty());
validator_context.clear();
......
......@@ -41,7 +41,7 @@ use polkadot_validation::{
SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators,
SignedStatement,
};
use sc_network::{config::Roles, Event, PeerId};
use sc_network::{ObservedRole, Event, PeerId};
use sp_api::ProvideRuntimeApi;
use sp_runtime::ConsensusEngineId;
......@@ -72,7 +72,7 @@ mod tests;
// Messages from the service API or network adapter.
enum ServiceToWorkerMsg {
// basic peer messages.
PeerConnected(PeerId, Roles),
PeerConnected(PeerId, ObservedRole),
PeerMessage(PeerId, Vec<bytes::Bytes>),
PeerDisconnected(PeerId),
......@@ -255,11 +255,11 @@ pub fn start<C, Api, SP>(
Event::NotificationStreamOpened {
remote,
engine_id,
roles,
role,
} => {
if engine_id != POLKADOT_ENGINE_ID { continue }
worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, roles)).await
worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, role)).await
},
Event::NotificationStreamClosed {
remote,
......@@ -496,8 +496,8 @@ impl ProtocolHandler {
}
}
fn on_connect(&mut self, peer: PeerId, roles: Roles) {
let claimed_validator = roles.contains(Roles::AUTHORITY);
fn on_connect(&mut self, peer: PeerId, role: ObservedRole) {
let claimed_validator = matches!(role, ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority | ObservedRole::Authority);
self.peers.insert(peer.clone(), PeerData {
claimed_validator,
......
......@@ -189,7 +189,7 @@ sp_api::mock_impl_runtime_apis! {
}
impl super::Service<MockNetworkOps> {
async fn connect_peer(&mut self, peer: PeerId, roles: Roles) {
async fn connect_peer(&mut self, peer: PeerId, roles: ObservedRole) {
self.sender.send(ServiceToWorkerMsg::PeerConnected(peer, roles)).await.unwrap();
}
......@@ -373,7 +373,7 @@ fn validator_peer_cleaned_up() {
pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::Authority).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
......@@ -433,7 +433,7 @@ fn validator_key_spillover_cleaned() {
pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::Authority).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
......
......@@ -22,7 +22,7 @@ mod block_import;
use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}};
use log::trace;
use sc_network::config::{build_multiaddr, FinalityProofProvider};
use sc_network::config::{build_multiaddr, FinalityProofProvider, Role};
use sp_blockchain::{
Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo,
};
......@@ -35,7 +35,6 @@ use sc_client_api::{
};
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client::LongestChain;
use sc_network::config::Roles;
use sp_consensus::block_validation::DefaultBlockAnnounceValidator;
use sp_consensus::import_queue::{
BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport,
......@@ -524,22 +523,21 @@ pub trait TestNetFactory: Sized {
/// Create new test network with this many peers.
fn new(n: usize) -> Self {
trace!(target: "test_network", "Creating test network");
let config = Self::default_config();
let mut net = Self::from_config(&config);
let mut net = Self::from_config(&Default::default());
for i in 0..n {
trace!(target: "test_network", "Adding peer {}", i);
net.add_full_peer(&config);
net.add_full_peer();
}
net
}
fn add_full_peer(&mut self, config: &ProtocolConfig) {
self.add_full_peer_with_states(config, None)
fn add_full_peer(&mut self,) {
self.add_full_peer_with_states(None)
}
/// Add a full peer.
fn add_full_peer_with_states(&mut self, config: &ProtocolConfig, keep_blocks: Option<u32>) {
fn add_full_peer_with_states(&mut self, keep_blocks: Option<u32>) {
let test_client_builder = match keep_blocks {
Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks),
None => TestClientBuilder::with_default_backend(),
......@@ -558,7 +556,7 @@ pub trait TestNetFactory: Sized {
let verifier = self.make_verifier(
PeersClient::Full(client.clone(), backend.clone()),
config,
&Default::default(),
&data,
);
let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
......@@ -581,7 +579,7 @@ pub trait TestNetFactory: Sized {
network_config.listen_addresses = vec![listen_addr.clone()];
network_config.transport = TransportConfig::MemoryOnly;
let network = NetworkWorker::new(sc_network::config::Params {
roles: config.roles,
role: Role::Full,
executor: None,
network_config,
chain: client.clone(),
......@@ -620,10 +618,7 @@ pub trait TestNetFactory: Sized {
}
/// Add a light peer.
fn add_light_peer(&mut self, config: &ProtocolConfig) {
let mut config = config.clone();
config.roles = Roles::LIGHT;
fn add_light_peer(&mut self) {
let (c, backend) = polkadot_test_runtime_client::new_light();
let client = Arc::new(c);
let (
......@@ -636,7 +631,7 @@ pub trait TestNetFactory: Sized {
let verifier = self.make_verifier(
PeersClient::Light(client.clone(), backend.clone()),
&config,
&Default::default(),
&data,
);
let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
......@@ -659,7 +654,7 @@ pub trait TestNetFactory: Sized {
network_config.listen_addresses = vec![listen_addr.clone()];
network_config.transport = TransportConfig::MemoryOnly;
let network = NetworkWorker::new(sc_network::config::Params {
roles: config.roles,
role: Role::Full,
executor: None,
network_config,
chain: client.clone(),
......
......@@ -31,7 +31,7 @@ use inherents::InherentDataProviders;
use sc_executor::native_executor_instance;
use log::info;
pub use service::{
AbstractService, Roles, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec,
};
......@@ -314,7 +314,8 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
use futures::stream::StreamExt;
let is_collator = collating_for.is_some();
let is_authority = config.roles.is_authority() && !is_collator;
let role = config.role.clone();
let is_authority = role.is_authority() && !is_collator;
let force_authoring = config.force_authoring;
let max_block_data_size = max_block_data_size;
let db_path = if let DatabaseConfig::Path { ref path, .. } = config.database {
......@@ -325,14 +326,8 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let disable_grandpa = config.disable_grandpa;
let name = config.network.node_name.clone();
let authority_discovery_enabled = authority_discovery_enabled;
let sentry_nodes = config.network.sentry_nodes.clone();
let slot_duration = slot_duration;
// sentry nodes announce themselves as authorities to the network
// and should run the same protocols authorities do, but it should
// never actively participate in any consensus process.
let participates_in_consensus = is_authority && !config.sentry_mode;
let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config, Runtime, Dispatch);
let backend = builder.backend().clone();
......@@ -388,7 +383,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
service.spawn_task_handle(),
).map_err(|e| format!("Could not spawn network worker: {:?}", e))?;
if participates_in_consensus {
if let Role::Authority { sentry_nodes } = &role {
let availability_store = {
use std::path::PathBuf;
......@@ -470,7 +465,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let authority_discovery = authority_discovery::AuthorityDiscovery::new(
service.client(),
network,
sentry_nodes,
sentry_nodes.clone(),
service.keystore(),
dht_event_stream,
service.prometheus_registry(),
......@@ -481,7 +476,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if participates_in_consensus {
let keystore = if is_authority {
Some(service.keystore())
} else {
None
......@@ -494,7 +489,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
name: Some(name),
observer_enabled: false,
keystore,
is_authority,
is_authority: role.is_network_authority(),
};
let enable_grandpa = !disable_grandpa;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment