Commit af0d87af authored by Kian Paimani's avatar Kian Paimani Committed by Gavin Wood
Browse files

Update to latest Substrate master. (#353)



* Integrate srml/im-online

* Fix all build errors with old aura.

* Fix most of the build errors.

* Builds and tests seem to pass (I will not trust this commit yet)

* Apply suggestions from code review

Co-Authored-By: asynchronous rob's avatarRobert Habermeier <rphmeier@gmail.com>

* Kill some warnings.

* fix panics on 0 validators

* Fix dev chain.

* Fix author stuff

* fix im online integration.

* Some tweaks

* Introduce app-crypto

* Initial build work

* codec update / tweaks

* patch polkadot-erasure-coding input

* More fixes for new crypto

* More fixes

* Update parachains module

* evamp parachain crypto

* More crypto work.

* Chain spec and service.

* ChainSpec stuff

* Last bits for a clean build

* Tweak coment

* adapt polkadot-validation to the new keystore

* polkadot-network compiles, but tests don't

* Integrate the new parachain validation stuff

* delete message_routing file

* make polkadot-network tests compile and pass

* runtime tests compile and pass

* update substrate ref

* service compiles

* all tests pass

* Add TODO, change branch back to polkadot-master

* Lock file

* TODOs done

* Issue number

* Remove old tODO

* Remove commented code
parent 7c6390fe
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -9,7 +9,7 @@ edition = "2018"
polkadot-primitives = { path = "../primitives" }
parking_lot = "0.9.0"
log = "0.4.6"
parity-codec = "4.1"
codec = { package = "parity-scale-codec", version = "~1.0.0", default-features = false, features = ["derive"] }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
kvdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" }
kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" }
......
......@@ -16,7 +16,7 @@
//! Persistent database for parachain data.
use parity_codec::{Encode, Decode};
use codec::{Encode, Decode};
use kvdb::{KeyValueDB, DBTransaction};
use kvdb_rocksdb::{Database, DatabaseConfig};
use polkadot_primitives::Hash;
......
......@@ -26,15 +26,15 @@ pub enum ChainSpec {
Development,
/// Whatever the current runtime is, with simple Alice/Bob auths.
LocalTestnet,
/// The PoC-3 era testnet.
Alexander,
/// The Kusama network.
Kusama,
/// Whatever the current runtime is with the "global testnet" defaults.
StagingTestnet,
}
impl Default for ChainSpec {
fn default() -> Self {
ChainSpec::Alexander
ChainSpec::Kusama
}
}
......@@ -42,7 +42,7 @@ impl Default for ChainSpec {
impl ChainSpec {
pub(crate) fn load(self) -> Result<service::ChainSpec, String> {
Ok(match self {
ChainSpec::Alexander => service::chain_spec::poc_3_testnet_config()?,
ChainSpec::Kusama => service::chain_spec::kusama_config()?,
ChainSpec::Development => service::chain_spec::development_config(),
ChainSpec::LocalTestnet => service::chain_spec::local_testnet_config(),
ChainSpec::StagingTestnet => service::chain_spec::staging_testnet_config(),
......@@ -53,7 +53,7 @@ impl ChainSpec {
match s {
"dev" => Some(ChainSpec::Development),
"local" => Some(ChainSpec::LocalTestnet),
"poc-3" | "alex" | "alexander" => Some(ChainSpec::Alexander),
"kusama" => Some(ChainSpec::Kusama),
"staging" => Some(ChainSpec::StagingTestnet),
"" => Some(ChainSpec::default()),
_ => None,
......
......@@ -9,11 +9,9 @@ edition = "2018"
futures = "0.1.17"
futures03 = { package = "futures-preview", version = "0.3.0-alpha.17", features = ["compat"] }
client = { package = "substrate-client", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
parity-codec = "4.1"
primitives = { package = "substrate-primitives", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
consensus_common = { package = "substrate-consensus-common", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
aura = { package = "substrate-consensus-aura", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
polkadot-runtime = { path = "../runtime", version = "0.1" }
polkadot-primitives = { path = "../primitives", version = "0.1" }
polkadot-cli = { path = "../cli" }
......
......@@ -53,12 +53,12 @@ use futures::{future, Stream, Future, IntoFuture};
use futures03::{TryStreamExt as _, StreamExt as _};
use log::{info, warn};
use client::BlockchainEvents;
use primitives::{ed25519, Pair};
use primitives::Pair;
use polkadot_primitives::{
BlockId, SessionKey, Hash, Block,
BlockId, Hash, Block,
parachain::{
self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId, Extrinsic,
PoVBlock, Status as ParachainStatus,
PoVBlock, Status as ParachainStatus, ValidatorId, CollatorPair,
}
};
use polkadot_cli::{
......@@ -69,7 +69,6 @@ use polkadot_network::validation::{SessionParams, ValidationNetwork};
use polkadot_network::NetworkService;
use tokio::timer::Timeout;
use consensus_common::SelectChain;
use aura::AuraApi;
pub use polkadot_cli::VersionInfo;
pub use polkadot_network::validation::Incoming;
......@@ -177,7 +176,7 @@ pub fn collate<'a, R, P>(
parachain_status: ParachainStatus,
relay_context: R,
para_context: P,
key: Arc<ed25519::Pair>,
key: Arc<CollatorPair>,
)
-> impl Future<Item=parachain::Collation, Error=Error<R::Error>> + 'a
where
......@@ -230,7 +229,7 @@ pub fn collate<'a, R, P>(
struct ApiContext<P, E> {
network: Arc<ValidationNetwork<P, E, NetworkService, TaskExecutor>>,
parent_hash: Hash,
authorities: Vec<SessionKey>,
validators: Vec<ValidatorId>,
}
impl<P: 'static, E: 'static> RelayChainContext for ApiContext<P, E> where
......@@ -248,7 +247,7 @@ impl<P: 'static, E: 'static> RelayChainContext for ApiContext<P, E> where
let _session = self.network.instantiate_session(SessionParams {
local_session_key: None,
parent_hash: self.parent_hash,
authorities: self.authorities.clone(),
authorities: self.validators.clone(),
}).map_err(|e| format!("unable to instantiate validation session: {:?}", e));
Box::new(future::ok(ConsolidatedIngress(Vec::new())))
......@@ -259,7 +258,7 @@ struct CollationNode<P, E> {
build_parachain_context: P,
exit: E,
para_id: ParaId,
key: Arc<ed25519::Pair>,
key: Arc<CollatorPair>,
}
impl<P, E> IntoExit for CollationNode<P, E> where
......@@ -364,18 +363,18 @@ impl<P, E> Worker for CollationNode<P, E> where
None => return future::Either::A(future::ok(())),
};
let authorities = try_fr!(api.authorities(&id));
let validators = try_fr!(api.validators(&id));
let targets = compute_targets(
para_id,
authorities.as_slice(),
validators.as_slice(),
try_fr!(api.duty_roster(&id)),
);
let context = ApiContext {
network: validation_network,
parent_hash: relay_parent,
authorities,
validators,
};
let collation_work = collate(
......@@ -414,7 +413,7 @@ impl<P, E> Worker for CollationNode<P, E> where
}
}
fn compute_targets(para_id: ParaId, session_keys: &[SessionKey], roster: DutyRoster) -> HashSet<SessionKey> {
fn compute_targets(para_id: ParaId, session_keys: &[ValidatorId], roster: DutyRoster) -> HashSet<ValidatorId> {
use polkadot_primitives::parachain::Chain;
roster.validator_duty.iter().enumerate()
......@@ -433,7 +432,7 @@ pub fn run_collator<P, E>(
build_parachain_context: P,
para_id: ParaId,
exit: E,
key: Arc<ed25519::Pair>,
key: Arc<CollatorPair>,
version: VersionInfo,
) -> polkadot_cli::error::Result<()> where
P: BuildParachainContext + Send + 'static,
......@@ -450,7 +449,7 @@ pub fn run_collator<P, E>(
mod tests {
use std::collections::HashMap;
use polkadot_primitives::parachain::{OutgoingMessage, FeeSchedule};
use keyring::Ed25519Keyring;
use keyring::Sr25519Keyring;
use super::*;
#[derive(Default, Clone)]
......@@ -540,7 +539,7 @@ mod tests {
},
context.clone(),
DummyParachainContext,
Ed25519Keyring::Alice.pair().into(),
Arc::new(Sr25519Keyring::Alice.pair().into()),
).wait().unwrap();
// ascending order by root.
......
......@@ -7,6 +7,6 @@ edition = "2018"
[dependencies]
primitives = { package = "polkadot-primitives", path = "../primitives" }
reed_solomon = { package = "reed-solomon-erasure", git = "https://github.com/paritytech/reed-solomon-erasure" }
parity-codec = "4.1"
codec = { package = "parity-scale-codec", version = "~1.0.0", default-features = false, features = ["derive"] }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
trie = { package = "substrate-trie", git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
......@@ -24,12 +24,12 @@
//! f is the maximum number of faulty validators in the system.
//! The data is coded so any f+1 chunks can be used to reconstruct the full data.
use parity_codec::{Encode, Decode};
use codec::{Encode, Decode};
use reed_solomon::galois_16::{self, ReedSolomon};
use primitives::{Hash as H256, BlakeTwo256, HashT};
use primitives::parachain::{BlockData, Extrinsic};
use substrate_primitives::Blake2Hasher;
use trie::{MemoryDB, Trie, TrieMut, TrieDB, TrieDBMut};
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
use self::wrapped_shard::WrappedShard;
......@@ -187,13 +187,14 @@ pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I)
// lazily decode from the data shards.
Decode::decode(&mut ShardInput {
remaining_len: shard_len.map(|s| s * params.data_shards).unwrap_or(0),
cur_shard: None,
shards: shards.iter()
.map(|x| x.as_ref())
.take(params.data_shards)
.map(|x| x.expect("all data shards have been recovered; qed"))
.map(|x| x.as_ref()),
}).ok_or_else(|| Error::BadPayload)
}).or_else(|_| Err(Error::BadPayload))
}
/// An iterator that yields merkle branches and chunk data for all chunks to
......@@ -269,7 +270,7 @@ pub fn branches<'a>(chunks: Vec<&'a [u8]>) -> Branches<'a> {
pub fn branch_hash(root: &H256, branch_nodes: &[Vec<u8>], index: usize) -> Result<H256, Error> {
let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
for node in branch_nodes.iter() {
(&mut trie_storage as &mut trie::HashDB<_>).insert(&[], node.as_slice());
(&mut trie_storage as &mut trie::HashDB<_>).insert(EMPTY_PREFIX, node.as_slice());
}
let trie = TrieDB::new(&trie_storage, &root).map_err(|_| Error::InvalidBranchProof)?;
......@@ -278,21 +279,26 @@ pub fn branch_hash(root: &H256, branch_nodes: &[Vec<u8>], index: usize) -> Resul
);
match res {
Ok(Some(Some(hash))) => Ok(hash),
Ok(Some(None)) => Err(Error::InvalidBranchProof), // hash failed to decode
Ok(Some(Ok(hash))) => Ok(hash),
Ok(Some(Err(_))) => Err(Error::InvalidBranchProof), // hash failed to decode
Ok(None) => Err(Error::BranchOutOfBounds),
Err(_) => Err(Error::InvalidBranchProof),
}
}
// input for `parity_codec` which draws data from the data shards
// input for `codec` which draws data from the data shards
struct ShardInput<'a, I> {
remaining_len: usize,
shards: I,
cur_shard: Option<(&'a [u8], usize)>,
}
impl<'a, I: Iterator<Item=&'a [u8]>> parity_codec::Input for ShardInput<'a, I> {
fn read(&mut self, into: &mut [u8]) -> usize {
impl<'a, I: Iterator<Item=&'a [u8]>> codec::Input for ShardInput<'a, I> {
fn remaining_len(&mut self) -> Result<Option<usize>, codec::Error> {
Ok(Some(self.remaining_len))
}
fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> {
let mut read_bytes = 0;
loop {
......@@ -320,7 +326,12 @@ impl<'a, I: Iterator<Item=&'a [u8]>> parity_codec::Input for ShardInput<'a, I> {
self.cur_shard = Some((active_shard, in_shard))
}
read_bytes
self.remaining_len -= read_bytes;
if read_bytes == into.len() {
Ok(())
} else {
Err("slice provided too big for input".into())
}
}
}
......
......@@ -11,7 +11,7 @@ parking_lot = "0.9.0"
av_store = { package = "polkadot-availability-store", path = "../availability-store" }
polkadot-validation = { path = "../validation" }
polkadot-primitives = { path = "../primitives" }
parity-codec = { version = "4.1", features = ["derive"] }
codec = { package = "parity-scale-codec", version = "~1.0.0", default-features = false, features = ["derive"] }
substrate-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
substrate-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
sr-primitives = { git = "https://github.com/paritytech/substrate", branch = "polkadot-master" }
......
......@@ -16,7 +16,7 @@
//! Bridge between the network and consensus service for getting collations to it.
use parity_codec::{Encode, Decode};
use codec::{Encode, Decode};
use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{CollatorId, Id as ParaId, Collation};
use substrate_network::PeerId;
......
......@@ -22,8 +22,8 @@ use substrate_network::consensus_gossip::{
ValidatorContext, MessageIntent, ConsensusMessage,
};
use polkadot_validation::{GenericStatement, SignedStatement};
use polkadot_primitives::{Block, Hash, SessionKey, parachain::ValidatorIndex};
use parity_codec::{Decode, Encode};
use polkadot_primitives::{Block, Hash, parachain::{ValidatorIndex, ValidatorId}};
use codec::{Decode, Encode};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
......@@ -218,9 +218,9 @@ impl RegisteredMessageValidator {
#[derive(Default)]
pub(crate) struct MessageValidationData {
/// The authorities at a block.
pub(crate) authorities: Vec<SessionKey>,
/// Mapping from validator index to `SessionKey`.
pub(crate) index_mapping: HashMap<ValidatorIndex, SessionKey>,
pub(crate) authorities: Vec<ValidatorId>,
/// Mapping from validator index to `ValidatorId`.
pub(crate) index_mapping: HashMap<ValidatorIndex, ValidatorId>,
}
impl MessageValidationData {
......@@ -481,15 +481,15 @@ impl<O: KnownOracle + ?Sized> network_gossip::Validator<Block> for MessageValida
-> GossipValidationResult<Hash>
{
let (res, cost_benefit) = match GossipMessage::decode(&mut data) {
None => (GossipValidationResult::Discard, cost::MALFORMED_MESSAGE),
Some(GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet))) => {
Err(_) => (GossipValidationResult::Discard, cost::MALFORMED_MESSAGE),
Ok(GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet))) => {
let (res, cb, topics) = self.inner.write().validate_neighbor_packet(sender, packet);
for new_topic in topics {
context.send_topic(sender, new_topic, false);
}
(res, cb)
}
Some(GossipMessage::Statement(statement)) => {
Ok(GossipMessage::Statement(statement)) => {
let (res, cb) = self.inner.write().validate_statement(statement);
if let GossipValidationResult::ProcessAndKeep(ref topic) = res {
context.broadcast_message(topic.clone(), data.to_vec(), false);
......@@ -535,7 +535,7 @@ impl<O: KnownOracle + ?Sized> network_gossip::Validator<Block> for MessageValida
};
match GossipMessage::decode(&mut &data[..]) {
Some(GossipMessage::Statement(statement)) => {
Ok(GossipMessage::Statement(statement)) => {
let signed = statement.signed_statement;
match signed.statement {
......@@ -573,7 +573,7 @@ mod tests {
use parking_lot::Mutex;
use polkadot_primitives::parachain::{CandidateReceipt, HeadData};
use substrate_primitives::crypto::UncheckedInto;
use substrate_primitives::ed25519::Signature as Ed25519Signature;
use substrate_primitives::sr25519::Signature as Sr25519Signature;
#[derive(PartialEq, Clone, Debug)]
enum ContextEvent {
......@@ -669,7 +669,7 @@ mod tests {
relay_parent: hash_a,
signed_statement: SignedStatement {
statement: GenericStatement::Candidate(candidate_receipt),
signature: Ed25519Signature([255u8; 64]),
signature: Sr25519Signature([255u8; 64]).into(),
sender: 1,
}
});
......@@ -786,7 +786,7 @@ mod tests {
relay_parent: hash_a,
signed_statement: SignedStatement {
statement: GenericStatement::Valid(c_hash),
signature: Ed25519Signature([255u8; 64]),
signature: Sr25519Signature([255u8; 64]).into(),
sender: 1,
}
});
......
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Data structures and synchronous logic for ICMP message gossip.
use sr_primitives::traits::{BlakeTwo256, Hash as HashT};
use polkadot_primitives::Hash;
use std::collections::{HashMap, HashSet};
use substrate_client::error::Error as ClientError;
use super::{MAX_CHAIN_HEADS, GossipValidationResult, LeavesVec, ChainContext};
/// Construct a topic for a message queue root deterministically.
pub fn queue_topic(queue_root: Hash) -> Hash {
let mut v = queue_root.as_ref().to_vec();
v.extend(b"message_queue");
BlakeTwo256::hash(&v[..])
}
/// A view of which queue roots are current for a given set of leaves.
#[derive(Default)]
pub struct View {
leaves: LeavesVec,
leaf_topics: HashMap<Hash, HashSet<Hash>>, // leaf_hash -> { topics }
expected_queues: HashMap<Hash, Hash>, // topic -> queue-root
}
impl View {
/// Update the set of current leaves.
pub fn update_leaves<T: ChainContext + ?Sized, I>(&mut self, context: &T, new_leaves: I)
-> Result<(), ClientError>
where I: Iterator<Item=Hash>
{
let new_leaves = new_leaves.take(MAX_CHAIN_HEADS);
let old_leaves = {
let mut new = LeavesVec::new();
for leaf in new_leaves {
new.push(leaf.clone());
}
std::mem::replace(&mut self.leaves, new)
};
let expected_queues = &mut self.expected_queues;
let leaves = &self.leaves;
self.leaf_topics.retain(|l, topics| {
if leaves.contains(l) { return true }
// prune out all data about old leaves we don't follow anymore.
for topic in topics.iter() {
expected_queues.remove(topic);
}
false
});
let mut res = Ok(());
// add in new data about fresh leaves.
for new_leaf in &self.leaves {
if old_leaves.contains(new_leaf) { continue }
let mut this_leaf_topics = HashSet::new();
let r = context.leaf_unrouted_roots(new_leaf, &mut |&queue_root| {
let topic = queue_topic(queue_root);
this_leaf_topics.insert(topic);
expected_queues.insert(topic, queue_root);
});
if r.is_err() {
res = r;
}
self.leaf_topics.insert(*new_leaf, this_leaf_topics);
}
res
}
/// Validate an incoming message queue against this view.
pub fn validate_queue(&self, messages: &super::GossipParachainMessages)
-> (GossipValidationResult<Hash>, i32)
{
let ostensible_topic = queue_topic(messages.queue_root);
if !self.is_topic_live(&ostensible_topic) {
(GossipValidationResult::Discard, super::cost::UNNEEDED_ICMP_MESSAGES)
} else if !messages.queue_root_is_correct() {
(
GossipValidationResult::Discard,
super::cost::icmp_messages_root_mismatch(messages.messages.len()),
)
} else {
(
GossipValidationResult::ProcessAndKeep(ostensible_topic),
super::benefit::NEW_ICMP_MESSAGES,
)
}
}
/// Whether a message with given topic is live.
pub fn is_topic_live(&self, topic: &Hash) -> bool {
self.expected_queues.get(topic).is_some()
}
/// Whether a message is allowed under the intersection of the given leaf-set
/// and our own.
pub fn allowed_intersecting(&self, other_leaves: &LeavesVec, topic: &Hash) -> bool {
for i in other_leaves {
for j in &self.leaves {
if i == j {
let leaf_topics = self.leaf_topics.get(i)
.expect("leaf_topics are mutated only in update_leaves; \
we have an entry for each item in self.leaves; \
i is in self.leaves; qed");
if leaf_topics.contains(topic) {
return true;
}
}
}
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::TestChainContext;
use crate::gossip::{Known, GossipParachainMessages};
use polkadot_primitives::parachain::Message as ParachainMessage;
fn hash(x: u8) -> Hash {
[x; 32].into()
}
fn message_queue(from: u8, to: u8) -> Option<[[u8; 2]; 1]> {
if from == to {
None
} else {
Some([[from, to]])
}
}
fn message_queue_root(from: u8, to: u8) -> Option<Hash> {
message_queue(from, to).map(
|q| polkadot_validation::message_queue_root(q.iter())
)
}
fn check_roots(view: &View, i: u8, max: u8) -> bool {
for j in 0..max {
if let Some(messages) = message_queue(i, j) {
let queue_root = message_queue_root(i, j).unwrap();
let messages = GossipParachainMessages {
queue_root,
messages: messages.iter().map(|m| ParachainMessage(m.to_vec())).collect(),
};
match view.validate_queue(&messages).0 {
GossipValidationResult::ProcessAndKeep(topic) => if topic != queue_topic(queue_root) {
return false
},
_ => return false,
}
}
}
true
}
#[test]
fn update_leaves_none_in_common() {
let mut ctx = TestChainContext::default();
let max = 5;
for i in 0..max {
ctx.known_map.insert(hash(i as u8), Known::Leaf);
let messages_out: Vec<_> = (0..max).filter_map(|j| message_queue_root(i, j)).collect();
if !messages_out.is_empty() {
ctx.ingress_roots.insert(hash(i as u8), messages_out);
}
}
let mut view = View::default();
view.update_leaves(
&ctx,
[hash(0), hash(1)].iter().cloned(),
).unwrap();
assert!(check_roots(&view, 0, max));
assert!(check_roots(&view, 1, max));
assert!(!check_roots(&view, 2, max));
assert!(!check_roots(&view, 3, max));
assert!(!check_roots(&view, 4, max));
assert!(!check_roots(&view, 5, max));
view.update_leaves(
&ctx,
[hash(2), hash(3), hash(4)].iter().cloned(),
).unwrap();
assert!(!check_roots(&view, 0, max));
assert!(!check_roots(&view, 1, max));