Commit adba1d58 authored by Bastian Köcher's avatar Bastian Köcher Committed by asynchronous rob
Browse files

Update to latest substrate-master and polkadot v0.3 (#195)



* Rebuild runtime

* Remove invalid value from chainspec (#68)

* service: use grandpa block import for locally sealed aura blocks (#85)

* bump version to v0.3.1

* Update lock file.

* limit number of transactions when building blocks (#91)

* Update to latest Substrate

* Bump to 0.3.2

* Actually bump.

* v0.3.2 (#98)

* bump substrate version

* fix polkadot-collator

* point to alexander-backports of substrate

* bump version

* cli: fix node shutdown (#100)

* update to latest substrate, change to v0.3.4

* update to latest substrate, bump version to 0.3.5

* v0.3.6

* try to build on every v0.3 commit and update alexander-backports

* bump to v0.3.7

* bump to 0.3.8

* Bump to 0.3.9: network and pruning improvements

* Bump to 0.3.10: reduce network bandwidth usage

* Use libp2p-kad 0.3.2 (#122)

* Bump libp2p-identify to 0.3.1 (#123)

* Bump to 0.3.12 (#127)

* Update Substrate again (#128)

* update substrate and bump version to v0.3.13

* bump version to v0.3.14: fix --reserved-nodes

* add a manually curated grandpa module (#136)

* updating v0.3 to use substrate v0.10 (#146)

* updating to latest substrate v0.10

* better handling of outer poll

* nit

* fix tests

* remove comment

* reduce indentation

* use self.poll

* bring oneshot into scope

* spaces

* wrap

* remove match

* wrap

* Update primitives/Cargo.toml
Co-Authored-By: default avatargterzian <2792687+gterzian@users.noreply.github.com>

* Update runtime/wasm/Cargo.toml
Co-Authored-By: default avatargterzian <2792687+gterzian@users.noreply.github.com>

* Update runtime/wasm/Cargo.toml
Co-Authored-By: default avatargterzian <2792687+gterzian@users.noreply.github.com>

* Update test-parachains/adder/collator/src/main.rs
Co-Authored-By: default avatargterzian <2792687+gterzian@users.noreply.github.com>

* indent

* add paranthese

* config: fix wrong ip for alexander bootnode (#161)

* fix curated-grandpa and rebuild wasm (#162)

* [v0.3] Integrates new gossip system into Polkadot (#166)

* new gossip validation in network

* integrate new gossip into service

* network: guard validation network future under exit signal (#168)

* bump version to v0.3.15: substrate v0.10

* [v0.3] update to substrate master (#175)

* update to substrate master

* fix test

* service: fix telemetry endpoints on alexander chainspec (#169) (#178)

* Update v0.3 to latest Substrate master (#177)

* update substrate v0.3 to latest master

* bump spec version

* update to latest master: remove fees module

* update runtime blobs

* bump version to 0.3.16

* replace sr25519 accountid with anysigner

* bump version to v0.3.17

* Some PoC-3 GRANDPA tweaks (#181)

* call on_finalise after triggering curated_grandpa change

* make grandpa rounds shorter for faster finalization

* use authorities when calculating duty roster (#185)

* [v0.3] Update to substrate master (#183)

* update to latest substrate master

* bump version to 0.3.18

* update to latest substrate master

* bump spec version

* update runtime wasm blobs

* remove current_offline_slash from chain spec

* update to substrate master: bump version to v0.3.19 (#188)

* update to substrate master: bump version to v0.3.19

libp2p network improvements

* network: replace NodeIndex with PeerId

* network: fix tests

* polkadot v0.3.20 (#190)

* update to substrate master: bump version to 0.3.20

* runtime: add offchain worker trait

* runtime: rebuild wasm blobs

* bump spec version (#191)

* Fix compilation

* Update version to 0.4.0

* Switch to use `polkadot-master` branch from substrate

* Remove unused struct

* Remove `grandpa::SyncedAuthorities` from `OnSessionChange`
parent 6696c8ef
Pipeline #34310 passed with stages
in 19 minutes and 2 seconds
......@@ -235,7 +235,7 @@ publish-s3-release:
- kubectl get nodes -l node=polkadot
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
- echo "# polkadots' nodes"
- kubectl -n polkadot get pods
- kubectl -n polkadot get pods
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -4,7 +4,7 @@ path = "src/main.rs"
[package]
name = "polkadot"
version = "0.3.0"
version = "0.4.0"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"
......
......@@ -9,7 +9,7 @@ polkadot-primitives = { path = "../primitives" }
parking_lot = "0.7.1"
log = "0.4.6"
parity-codec = "3.0"
substrate-primitives = { git = "https://github.com/paritytech/substrate" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
kvdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" }
kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" }
kvdb-memorydb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" }
[package]
name = "polkadot-cli"
version = "0.3.0"
version = "0.4.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "Polkadot node implementation in Rust."
......@@ -9,5 +9,5 @@ log = "0.4.6"
tokio = "0.1.7"
futures = "0.1.17"
exit-future = "0.1"
substrate-cli = { git = "https://github.com/paritytech/substrate" }
substrate-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
polkadot-service = { path = "../service" }
......@@ -36,14 +36,13 @@ use chain_spec::ChainSpec;
use futures::Future;
use tokio::runtime::Runtime;
use service::Service as BareService;
use cli::NoCustom;
pub use service::{
Components as ServiceComponents, PolkadotService, CustomConfiguration, ServiceFactory, Factory,
ProvideRuntimeApi, CoreApi, ParachainHost,
};
pub use cli::{VersionInfo, IntoExit};
pub use cli::{VersionInfo, IntoExit, NoCustom};
pub use cli::error;
pub use tokio::runtime::TaskExecutor;
......
......@@ -6,9 +6,9 @@ description = "Collator node implementation"
[dependencies]
futures = "0.1.17"
substrate-client = { git = "https://github.com/paritytech/substrate" }
substrate-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
parity-codec = "3.0"
substrate-primitives = { git = "https://github.com/paritytech/substrate" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
polkadot-runtime = { path = "../runtime", version = "0.1" }
polkadot-primitives = { path = "../primitives", version = "0.1" }
polkadot-cli = { path = "../cli" }
......@@ -18,4 +18,4 @@ log = "0.4"
tokio = "0.1.7"
[dev-dependencies]
substrate-keyring = { git = "https://github.com/paritytech/substrate" }
substrate-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
......@@ -259,14 +259,15 @@ impl<P, E> Worker for CollationNode<P, E> where
match known_oracle.block_status(&BlockId::hash(*block_hash)) {
Err(_) | Ok(BlockStatus::Unknown) | Ok(BlockStatus::Queued) => None,
Ok(BlockStatus::KnownBad) => Some(Known::Bad),
Ok(BlockStatus::InChain) => match known_oracle.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) =>
match known_oracle.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
}
},
);
......@@ -481,3 +482,4 @@ mod tests {
assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]);
}
}
......@@ -8,5 +8,5 @@ edition = "2018"
polkadot-primitives = { path = "../primitives" }
reed-solomon-erasure = { git = "https://github.com/paritytech/reed-solomon-erasure" }
parity-codec = "3.0"
substrate-primitives = { git = "https://github.com/paritytech/substrate" }
substrate-trie = { git = "https://github.com/paritytech/substrate" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
......@@ -5,6 +5,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
description = "Polkadot node implementation in Rust."
[dependencies]
substrate-executor = { git = "https://github.com/paritytech/substrate" }
substrate-primitives = { git = "https://github.com/paritytech/substrate" }
substrate-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
polkadot-runtime = { path = "../runtime" }
......@@ -12,9 +12,9 @@ polkadot-validation = { path = "../validation" }
polkadot-primitives = { path = "../primitives" }
parity-codec = "3.0"
parity-codec-derive = "3.0"
substrate-network = { git = "https://github.com/paritytech/substrate" }
substrate-primitives = { git = "https://github.com/paritytech/substrate" }
sr-primitives = { git = "https://github.com/paritytech/substrate" }
substrate-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
sr-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
futures = "0.1"
tokio = "0.1.7"
log = "0.4"
......@@ -22,5 +22,5 @@ slice-group-by = "0.2.2"
exit-future = "0.1.4"
[dev-dependencies]
substrate-client = { git = "https://github.com/paritytech/substrate" }
substrate-keyring = { git = "https://github.com/paritytech/substrate" }
substrate-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
......@@ -16,8 +16,8 @@
//! Bridge between the network and consensus service for getting collations to it.
use polkadot_primitives::{parachain::CollatorId, Hash};
use polkadot_primitives::parachain::{Id as ParaId, Collation};
use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{CollatorId, Id as ParaId, Collation};
use futures::sync::oneshot;
use std::collections::hash_map::{HashMap, Entry};
......
......@@ -55,9 +55,9 @@ pub mod gossip;
use codec::{Decode, Encode};
use futures::sync::oneshot;
use polkadot_primitives::{Block, SessionKey, Hash, Header, parachain::CollatorId};
use polkadot_primitives::parachain::{Id as ParaId, BlockData, CandidateReceipt, Collation};
use substrate_network::{NodeIndex, RequestId, Context, Severity};
use polkadot_primitives::{Block, SessionKey, Hash, Header};
use polkadot_primitives::parachain::{Id as ParaId, CollatorId, BlockData, CandidateReceipt, Collation};
use substrate_network::{PeerId, RequestId, Context, Severity};
use substrate_network::{message, generic_message};
use substrate_network::specialization::NetworkSpecialization as Specialization;
use substrate_network::StatusMessage as GenericFullStatus;
......@@ -156,7 +156,7 @@ pub enum Message {
Collation(Hash, Collation),
}
fn send_polkadot_message(ctx: &mut Context<Block>, to: NodeIndex, message: Message) {
fn send_polkadot_message(ctx: &mut Context<Block>, to: PeerId, message: Message) {
trace!(target: "p_net", "Sending polkadot message to {}: {:?}", to, message);
let encoded = message.encode();
ctx.send_message(to, generic_message::Message::ChainSpecific(encoded))
......@@ -164,13 +164,13 @@ fn send_polkadot_message(ctx: &mut Context<Block>, to: NodeIndex, message: Messa
/// Polkadot protocol attachment for substrate.
pub struct PolkadotProtocol {
peers: HashMap<NodeIndex, PeerInfo>,
peers: HashMap<PeerId, PeerInfo>,
collating_for: Option<(CollatorId, ParaId)>,
collators: CollatorPool,
validators: HashMap<SessionKey, NodeIndex>,
validators: HashMap<SessionKey, PeerId>,
local_collations: LocalCollations<Collation>,
live_validation_sessions: LiveValidationSessions,
in_flight: HashMap<(RequestId, NodeIndex), BlockDataRequest>,
in_flight: HashMap<(RequestId, PeerId), BlockDataRequest>,
pending: Vec<BlockDataRequest>,
extrinsic_store: Option<::av_store::Store>,
next_req_id: u64,
......@@ -225,7 +225,7 @@ impl PolkadotProtocol {
{
peer_data.collator_state.send_key(new_local.clone(), |msg| send_polkadot_message(
ctx,
*id,
id.clone(),
msg
));
}
......@@ -257,7 +257,7 @@ impl PolkadotProtocol {
}
Err(Some(known_keys)) => {
let next_peer = known_keys.iter()
.filter_map(|x| validator_keys.get(x).map(|id| (x.clone(), *id)))
.filter_map(|x| validator_keys.get(x).map(|id| (x.clone(), id.clone())))
.find(|&(ref key, _)| pending.attempted_peers.insert(key.clone()))
.map(|(_, id)| id);
......@@ -268,7 +268,7 @@ impl PolkadotProtocol {
send_polkadot_message(
ctx,
who,
who.clone(),
Message::RequestBlockData(req_id, parent, c_hash),
);
......@@ -290,7 +290,7 @@ impl PolkadotProtocol {
self.pending = new_pending;
}
fn on_polkadot_message(&mut self, ctx: &mut Context<Block>, who: NodeIndex, msg: Message) {
fn on_polkadot_message(&mut self, ctx: &mut Context<Block>, who: PeerId, msg: Message) {
trace!(target: "p_net", "Polkadot message from {}: {:?}", who, msg);
match msg {
Message::SessionKey(key) => self.on_session_key(ctx, who, key),
......@@ -313,7 +313,7 @@ impl PolkadotProtocol {
}
}
fn on_session_key(&mut self, ctx: &mut Context<Block>, who: NodeIndex, key: SessionKey) {
fn on_session_key(&mut self, ctx: &mut Context<Block>, who: PeerId, key: SessionKey) {
{
let info = match self.peers.get_mut(&who) {
Some(peer) => peer,
......@@ -343,7 +343,7 @@ impl PolkadotProtocol {
for (relay_parent, collation) in new_collations {
send_polkadot_message(
ctx,
who,
who.clone(),
Message::Collation(relay_parent, collation),
)
}
......@@ -354,8 +354,8 @@ impl PolkadotProtocol {
self.dispatch_pending_requests(ctx);
}
fn on_block_data(&mut self, ctx: &mut Context<Block>, who: NodeIndex, req_id: RequestId, data: Option<BlockData>) {
match self.in_flight.remove(&(req_id, who)) {
fn on_block_data(&mut self, ctx: &mut Context<Block>, who: PeerId, req_id: RequestId, data: Option<BlockData>) {
match self.in_flight.remove(&(req_id, who.clone())) {
Some(req) => {
if let Some(data) = data {
if data.hash() == req.block_data_hash {
......@@ -372,7 +372,7 @@ impl PolkadotProtocol {
}
// when a validator sends us (a collator) a new role.
fn on_new_role(&mut self, ctx: &mut Context<Block>, who: NodeIndex, role: Role) {
fn on_new_role(&mut self, ctx: &mut Context<Block>, who: PeerId, role: Role) {
let info = match self.peers.get_mut(&who) {
Some(peer) => peer,
None => {
......@@ -400,7 +400,7 @@ impl PolkadotProtocol {
debug!(target: "p_net", "Broadcasting collation on relay parent {:?}", relay_parent);
send_polkadot_message(
ctx,
who,
who.clone(),
Message::Collation(relay_parent, collation),
)
}
......@@ -413,7 +413,7 @@ impl Specialization<Block> for PolkadotProtocol {
Status { collating_for: self.collating_for.clone() }.encode()
}
fn on_connect(&mut self, ctx: &mut Context<Block>, who: NodeIndex, status: FullStatus) {
fn on_connect(&mut self, ctx: &mut Context<Block>, who: PeerId, status: FullStatus) {
let local_status = match Status::decode(&mut &status.chain_status[..]) {
Some(status) => status,
None => {
......@@ -440,7 +440,7 @@ impl Specialization<Block> for PolkadotProtocol {
peer_info.collator_state.set_role(collator_role, |msg| send_polkadot_message(
ctx,
who,
who.clone(),
msg,
));
}
......@@ -450,7 +450,7 @@ impl Specialization<Block> for PolkadotProtocol {
for local_session_key in self.live_validation_sessions.recent_keys() {
peer_info.collator_state.send_key(local_session_key.clone(), |msg| send_polkadot_message(
ctx,
who,
who.clone(),
msg,
));
}
......@@ -460,7 +460,7 @@ impl Specialization<Block> for PolkadotProtocol {
self.dispatch_pending_requests(ctx);
}
fn on_disconnect(&mut self, ctx: &mut Context<Block>, who: NodeIndex) {
fn on_disconnect(&mut self, ctx: &mut Context<Block>, who: PeerId) {
if let Some(info) = self.peers.remove(&who) {
if let Some((acc_id, _)) = info.collating_for {
let new_primary = self.collators.on_disconnect(acc_id)
......@@ -469,7 +469,7 @@ impl Specialization<Block> for PolkadotProtocol {
if let Some((new_primary, primary_info)) = new_primary {
primary_info.collator_state.set_role(Role::Primary, |msg| send_polkadot_message(
ctx,
new_primary,
new_primary.clone(),
msg,
));
}
......@@ -502,7 +502,7 @@ impl Specialization<Block> for PolkadotProtocol {
}
}
fn on_message(&mut self, ctx: &mut Context<Block>, who: NodeIndex, message: &mut Option<message::Message<Block>>) {
fn on_message(&mut self, ctx: &mut Context<Block>, who: PeerId, message: &mut Option<message::Message<Block>>) {
match message.take() {
Some(generic_message::Message::ChainSpecific(raw)) => {
match Message::decode(&mut raw.as_slice()) {
......@@ -532,7 +532,7 @@ impl Specialization<Block> for PolkadotProtocol {
Action::NewRole(account_id, role) => if let Some((collator, info)) = self.collator_peer(account_id) {
info.collator_state.set_role(role, |msg| send_polkadot_message(
ctx,
collator,
collator.clone(),
msg,
))
},
......@@ -548,7 +548,7 @@ impl Specialization<Block> for PolkadotProtocol {
impl PolkadotProtocol {
// we received a collation from a peer
fn on_collation(&mut self, ctx: &mut Context<Block>, from: NodeIndex, relay_parent: Hash, collation: Collation) {
fn on_collation(&mut self, ctx: &mut Context<Block>, from: PeerId, relay_parent: Hash, collation: Collation) {
let collation_para = collation.receipt.parachain_index;
let collated_acc = collation.receipt.collator.clone();
......@@ -577,7 +577,7 @@ impl PolkadotProtocol {
}
// get connected peer with given account ID for collation.
fn collator_peer(&mut self, collator_id: CollatorId) -> Option<(NodeIndex, &mut PeerInfo)> {
fn collator_peer(&mut self, collator_id: CollatorId) -> Option<(PeerId, &mut PeerInfo)> {
let check_info = |info: &PeerInfo| info
.collating_for
.as_ref()
......@@ -586,7 +586,7 @@ impl PolkadotProtocol {
self.peers
.iter_mut()
.filter(|&(_, ref info)| check_info(&**info))
.map(|(who, info)| (*who, info))
.map(|(who, info)| (who.clone(), info))
.next()
}
......@@ -616,7 +616,7 @@ impl PolkadotProtocol {
debug!(target: "p_net", "Sending local collation to {:?}", primary);
send_polkadot_message(
ctx,
*who,
who.clone(),
Message::Collation(relay_parent, cloned_collation),
)
},
......
......@@ -25,13 +25,13 @@
use sr_primitives::traits::{ProvideRuntimeApi, BlakeTwo256, Hash as HashT};
use polkadot_validation::{
SharedTable, TableRouter, SignedStatement, GenericStatement, ParachainWork, Incoming,
Validated, Outgoing,
SharedTable, TableRouter, SignedStatement, GenericStatement, ParachainWork, Outgoing, Validated
};
use polkadot_primitives::{Block, Hash, SessionKey};
use polkadot_primitives::parachain::{
BlockData, Extrinsic, CandidateReceipt, ParachainHost, Id as ParaId, Message
};
use gossip::RegisteredMessageValidator;
use codec::{Encode, Decode};
use futures::prelude::*;
......@@ -41,7 +41,7 @@ use std::collections::{HashMap, HashSet};
use std::io;
use std::sync::Arc;
use validation::{self, SessionDataFetcher, NetworkService, Executor};
use validation::{self, SessionDataFetcher, NetworkService, Executor, Incoming};
type IngressPairRef<'a> = (ParaId, &'a [Message]);
......@@ -59,19 +59,22 @@ pub struct Router<P, E, N: NetworkService, T> {
attestation_topic: Hash,
fetcher: SessionDataFetcher<P, E, N, T>,
deferred_statements: Arc<Mutex<DeferredStatements>>,
message_validator: RegisteredMessageValidator,
}
impl<P, E, N: NetworkService, T> Router<P, E, N, T> {
pub(crate) fn new(
table: Arc<SharedTable>,
fetcher: SessionDataFetcher<P, E, N, T>,
message_validator: RegisteredMessageValidator,
) -> Self {
let parent_hash = fetcher.parent_hash();
Router {
table,
fetcher,
attestation_topic: attestation_topic(parent_hash),
deferred_statements: Arc::new(Mutex::new(DeferredStatements::new())),
fetcher,
message_validator,
}
}
......@@ -105,6 +108,7 @@ impl<P, E: Clone, N: NetworkService, T: Clone> Clone for Router<P, E, N, T> {
fetcher: self.fetcher.clone(),
attestation_topic: self.attestation_topic.clone(),
deferred_statements: self.deferred_statements.clone(),
message_validator: self.message_validator.clone(),
}
}
}
......@@ -213,6 +217,7 @@ impl<P: ProvideRuntimeApi + Send + Sync + 'static, E, N, T> Router<P, E, N, T> w
validated.extrinsic().cloned(),
);
// propagate the statement.
// consider something more targeted than gossip in the future.
let signed = table.import_validated(validated);
......@@ -254,7 +259,9 @@ impl<P: ProvideRuntimeApi + Send, E, N, T> TableRouter for Router<P, E, N, T> wh
impl<P, E, N: NetworkService, T> Drop for Router<P, E, N, T> {
fn drop(&mut self) {
self.fetcher.network().drop_gossip(self.attestation_topic);
let parent_hash = self.parent_hash().clone();
self.message_validator.remove_session(&parent_hash);
self.network().with_spec(move |spec, _| { spec.remove_validation_session(parent_hash); });
}
}
......
......@@ -25,7 +25,7 @@ use polkadot_primitives::parachain::{CandidateReceipt, HeadData, BlockData, Coll
use substrate_primitives::crypto::UncheckedInto;
use codec::Encode;
use substrate_network::{
Severity, NodeIndex, PeerInfo, ClientHandle, Context, config::Roles,
Severity, PeerId, PeerInfo, ClientHandle, Context, config::Roles,
message::Message as SubstrateMessage, specialization::NetworkSpecialization,
generic_message::Message as GenericMessage
};
......@@ -36,9 +36,9 @@ mod validation;
#[derive(Default)]
struct TestContext {
disabled: Vec<NodeIndex>,
disconnected: Vec<NodeIndex>,
messages: Vec<(NodeIndex, SubstrateMessage<Block>)>,
disabled: Vec<PeerId>,
disconnected: Vec<PeerId>,
messages: Vec<(PeerId, SubstrateMessage<Block>)>,
}
impl Context<Block> for TestContext {
......@@ -46,24 +46,24 @@ impl Context<Block> for TestContext {
unimplemented!()
}
fn report_peer(&mut self, peer: NodeIndex, reason: Severity) {
fn report_peer(&mut self, peer: PeerId, reason: Severity) {
match reason {
Severity::Bad(_) => self.disabled.push(peer),
_ => self.disconnected.push(peer),
}
}
fn peer_info(&self, _peer: NodeIndex) -> Option<PeerInfo<Block>> {
fn peer_info(&self, _peer: &PeerId) -> Option<PeerInfo<Block>> {
unimplemented!()
}
fn send_message(&mut self, who: NodeIndex, data: SubstrateMessage<Block>) {
fn send_message(&mut self, who: PeerId, data: SubstrateMessage<Block>) {
self.messages.push((who, data))
}
}
impl TestContext {
fn has_message(&self, to: NodeIndex, message: Message) -> bool {
fn has_message(&self, to: PeerId, message: Message) -> bool {
use substrate_network::generic_message::Message as GenericMessage;
let encoded = message.encode();
......@@ -94,7 +94,7 @@ fn make_validation_session(parent_hash: Hash, local_key: SessionKey) -> SessionP
}
}
fn on_message(protocol: &mut PolkadotProtocol, ctx: &mut TestContext, from: NodeIndex, message: Message) {
fn on_message(protocol: &mut PolkadotProtocol, ctx: &mut TestContext, from: PeerId, message: Message) {
let encoded = message.encode();
protocol.on_message(ctx, from, &mut Some(GenericMessage::ChainSpecific(encoded)));
}
......@@ -103,8 +103,8 @@ fn on_message(protocol: &mut PolkadotProtocol, ctx: &mut TestContext, from: Node
fn sends_session_key() {
let mut protocol = PolkadotProtocol::new(None);
let peer_a = 1;
let peer_b = 2;
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let parent_hash = [0; 32].into();
let local_key: ValidatorId = [1; 32].unchecked_into();
......@@ -113,7 +113,7 @@ fn sends_session_key() {
{
let mut ctx = TestContext::default();
protocol.on_connect(&mut ctx, peer_a, make_status(&validator_status, Roles::AUTHORITY));
protocol.on_connect(&mut ctx, peer_a.clone(), make_status(&validator_status, Roles::AUTHORITY));
assert!(ctx.messages.is_empty());
}
......@@ -126,8 +126,8 @@ fn sends_session_key() {
{
let mut ctx = TestContext::default();
protocol.on_connect(&mut ctx, peer_b, make_status(&collator_status, Roles::NONE));
assert!(ctx.has_message(peer_b, Message::SessionKey(local_key)));
protocol.on_connect(&mut ctx, peer_b.clone(), make_status(&collator_status, Roles::NONE));
assert!(ctx.has_message(peer_b.clone(), Message::SessionKey(local_key)));
}
}
......@@ -135,8 +135,8 @@ fn sends_session_key() {
fn fetches_from_those_with_knowledge() {
let mut protocol = PolkadotProtocol::new(None);
let peer_a = 1;
let peer_b = 2;
let peer_a = PeerId::random();
let peer_b = PeerId::random();
let parent_hash = [0; 32].into();
let local_key: ValidatorId = [1; 32].unchecked_into();
......@@ -169,16 +169,16 @@ fn fetches_from_those_with_knowledge() {
// connect peer A
{
let mut ctx = TestContext::default();
protocol.on_connect(&mut ctx, peer_a, make_status(&status, Roles::AUTHORITY));
assert!(ctx.has_message(peer_a, Message::SessionKey(local_key)));
protocol.on_connect(&mut ctx, peer_a.clone(), make_status(&status, Roles::AUTHORITY));
assert!(ctx.has_message(peer_a.clone(), Message::SessionKey(local_key)));
}
// peer A gives session key and gets asked for data.
{
let mut ctx = TestContext::default();
on_message(&mut protocol, &mut ctx, peer_a, Message::SessionKey(a_key.clone()));
on_message(&mut protocol, &mut ctx, peer_a.clone(), Message::SessionKey(a_key.clone()));
assert!(protocol.validators.contains_key(&a_key));
assert!(ctx.has_message(peer_a, Message::RequestBlockData(1, parent_hash, candidate_hash)));
assert!(ctx.has_message(peer_a.clone(), Message::RequestBlockData(1, parent_hash, candidate_hash)));
}
knowledge.lock().note_statement(b_key.clone(), &GenericStatement::Valid(candidate_hash));
......@@ -186,18 +186,18 @@ fn fetches_from_those_with_knowledge() {
// peer B connects and sends session key. request already assigned to A
{
let mut ctx = TestContext::default();
protocol.on_connect(&mut ctx, peer_b, make_status(&status, Roles::AUTHORITY));
on_message(&mut protocol, &mut ctx, peer_b, Message::SessionKey(b_key));
assert!(!ctx.has_message(peer_b, Message::RequestBlockData(2, parent_hash, candidate_hash)));
protocol.on_connect(&mut ctx, peer_b.clone(), make_status(&status, Roles::AUTHORITY));
on_message(&mut protocol, &mut ctx, peer_b.clone(), Message::SessionKey(b_key.clone()));
assert!(!ctx.has_message(peer_b.clone(), Message::RequestBlockData(2, parent_hash, candidate_hash)));
}
// peer A disconnects, triggering reassignment
{
let mut ctx = TestContext::default();
protocol.on_disconnect(&mut ctx, peer_a);
protocol.on_disconnect(&mut ctx, peer_a.clone());
assert!(!protocol.validators.contains_key(&a_key));
assert!(ctx.has_message(peer_b, Message::RequestBlockData(2, parent_hash, candidate_hash)));
assert!(ctx.has_message(peer_b.clone(), Message::RequestBlockData(2, parent_hash, candidate_hash)));
}
// peer B comes back with block data.
......@@ -213,7 +213,7 @@ fn fetches_from_those_with_knowledge() {
fn fetches_available_block_data() {
let mut protocol = PolkadotProtocol::new(None);
let peer_a = 1;
let peer_a = PeerId::random();
let parent_hash = [0; 32].into();
let block_data = BlockData(vec![1, 2, 3, 4]);
...