Unverified Commit 03cfa5e9 authored by asynchronous rob's avatar asynchronous rob Committed by GitHub
Browse files

ICMP message-routing gossip (#304)



* core logic for ICMP gossip

* refactor gossip to make more extension friendly

* move files aroun

* extract attestation-gossip logic to its own module

* message validation and broadcast logic

* fix upstream crates' compilation

* add a test

* another test for overlapping

* Some grammar and phrasing tweaks
Co-Authored-By: default avatarLuke Schoen <ltfschoen@users.noreply.github.com>

* add since parameter to ingress runtime API

* broadcast out known unrouted message queues

* fix compilation of service and collator

* remove useless index_mapping

* some tests for icmp propagation

* fix decoding bug and test icmp queue validation

* simplify engine-id definition
Co-Authored-By: Sergey Pepyakin's avatarSergei Pepyakin <sergei@parity.io>

* address some grumbles

* some cleanup of old circulation code

* give network a handle to extrinsic store on startup

* an honest collator ensures data available as well

* address some grumbles

* add docs; rename the attestation session to "leaf work"

* module docs

* move gossip back to gossip.rs

* clean up and document attestation-gossip a bit

* some more docs on the availability store

* store all outgoing message queues in the availability store

* filter `Extrinsic` out of validation crate

* expunge Extrinsic from network

* expunge Extrinsic from erasure-coding

* expunge Extrinsic from collator

* expunge from adder-collator

* rename ExtrinsicStore to AvailabilityStore everywhere

* annotate and clean up message-routing tests
parent 67e1ba14
Pipeline #50607 canceled with stages
in 11 minutes and 9 seconds
...@@ -14,13 +14,17 @@ ...@@ -14,13 +14,17 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. // along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Persistent database for parachain data. //! Persistent database for parachain data: PoV block data and outgoing messages.
//!
//! This will be written into during the block validation pipeline, and queried
//! by networking code in order to circulate required data and maintain availability
//! of it.
use codec::{Encode, Decode}; use codec::{Encode, Decode};
use kvdb::{KeyValueDB, DBTransaction}; use kvdb::{KeyValueDB, DBTransaction};
use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb_rocksdb::{Database, DatabaseConfig};
use polkadot_primitives::Hash; use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic}; use polkadot_primitives::parachain::{Id as ParaId, BlockData, Message};
use log::warn; use log::warn;
use std::collections::HashSet; use std::collections::HashSet;
...@@ -42,7 +46,7 @@ pub struct Config { ...@@ -42,7 +46,7 @@ pub struct Config {
pub path: PathBuf, pub path: PathBuf,
} }
/// Some data to keep available. /// Some data to keep available about a parachain block candidate.
pub struct Data { pub struct Data {
/// The relay chain parent hash this should be localized to. /// The relay chain parent hash this should be localized to.
pub relay_parent: Hash, pub relay_parent: Hash,
...@@ -52,18 +56,16 @@ pub struct Data { ...@@ -52,18 +56,16 @@ pub struct Data {
pub candidate_hash: Hash, pub candidate_hash: Hash,
/// Block data. /// Block data.
pub block_data: BlockData, pub block_data: BlockData,
/// Extrinsic data. /// Outgoing message queues from execution of the block, if any.
pub extrinsic: Option<Extrinsic>, ///
/// The tuple pairs the message queue root and the queue data.
pub outgoing_queues: Option<Vec<(Hash, Vec<Message>)>>,
} }
fn block_data_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec<u8> { fn block_data_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec<u8> {
(relay_parent, candidate_hash, 0i8).encode() (relay_parent, candidate_hash, 0i8).encode()
} }
fn extrinsic_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec<u8> {
(relay_parent, candidate_hash, 1i8).encode()
}
/// Handle to the availability store. /// Handle to the availability store.
#[derive(Clone)] #[derive(Clone)]
pub struct Store { pub struct Store {
...@@ -96,6 +98,16 @@ impl Store { ...@@ -96,6 +98,16 @@ impl Store {
} }
/// Make some data available provisionally. /// Make some data available provisionally.
///
/// Validators with the responsibility of maintaining availability
/// for a block or collators collating a block will call this function
/// in order to persist that data to disk and so it can be queried and provided
/// to other nodes in the network.
///
/// The message data of `Data` is optional but is expected
/// to be present with the exception of the case where there is no message data
/// due to the block's invalidity. Determination of invalidity is beyond the
/// scope of this function.
pub fn make_available(&self, data: Data) -> io::Result<()> { pub fn make_available(&self, data: Data) -> io::Result<()> {
let mut tx = DBTransaction::new(); let mut tx = DBTransaction::new();
...@@ -118,12 +130,16 @@ impl Store { ...@@ -118,12 +130,16 @@ impl Store {
data.block_data.encode() data.block_data.encode()
); );
if let Some(extrinsic) = data.extrinsic { if let Some(outgoing_queues) = data.outgoing_queues {
tx.put_vec( // This is kept forever and not pruned.
columns::DATA, for (root, messages) in outgoing_queues {
extrinsic_key(&data.relay_parent, &data.candidate_hash).as_slice(), tx.put_vec(
extrinsic.encode(), columns::DATA,
); root.as_ref(),
messages.encode(),
);
}
} }
self.inner.write(tx) self.inner.write(tx)
...@@ -146,7 +162,6 @@ impl Store { ...@@ -146,7 +162,6 @@ impl Store {
for candidate_hash in v { for candidate_hash in v {
if !finalized_candidates.contains(&candidate_hash) { if !finalized_candidates.contains(&candidate_hash) {
tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice()); tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice());
tx.delete(columns::DATA, extrinsic_key(&parent, &candidate_hash).as_slice());
} }
} }
...@@ -168,12 +183,11 @@ impl Store { ...@@ -168,12 +183,11 @@ impl Store {
} }
} }
/// Query extrinsic data. /// Query message queue data by message queue root hash.
pub fn extrinsic(&self, relay_parent: Hash, candidate_hash: Hash) -> Option<Extrinsic> { pub fn queue_by_root(&self, queue_root: &Hash) -> Option<Vec<Message>> {
let encoded_key = extrinsic_key(&relay_parent, &candidate_hash); match self.inner.get(columns::DATA, queue_root.as_ref()) {
match self.inner.get(columns::DATA, &encoded_key[..]) {
Ok(Some(raw)) => Some( Ok(Some(raw)) => Some(
Extrinsic::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed") <_>::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
), ),
Ok(None) => None, Ok(None) => None,
Err(e) => { Err(e) => {
...@@ -207,7 +221,7 @@ mod tests { ...@@ -207,7 +221,7 @@ mod tests {
parachain_id: para_id_1, parachain_id: para_id_1,
candidate_hash: candidate_1, candidate_hash: candidate_1,
block_data: block_data_1.clone(), block_data: block_data_1.clone(),
extrinsic: Some(Extrinsic { outgoing_messages: Vec::new() }), outgoing_queues: None,
}).unwrap(); }).unwrap();
store.make_available(Data { store.make_available(Data {
...@@ -215,21 +229,53 @@ mod tests { ...@@ -215,21 +229,53 @@ mod tests {
parachain_id: para_id_2, parachain_id: para_id_2,
candidate_hash: candidate_2, candidate_hash: candidate_2,
block_data: block_data_2.clone(), block_data: block_data_2.clone(),
extrinsic: Some(Extrinsic { outgoing_messages: Vec::new() }), outgoing_queues: None,
}).unwrap(); }).unwrap();
assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1); assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2); assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2);
assert!(store.extrinsic(relay_parent, candidate_1).is_some());
assert!(store.extrinsic(relay_parent, candidate_2).is_some());
store.candidates_finalized(relay_parent, [candidate_1].iter().cloned().collect()).unwrap(); store.candidates_finalized(relay_parent, [candidate_1].iter().cloned().collect()).unwrap();
assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1); assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
assert!(store.block_data(relay_parent, candidate_2).is_none()); assert!(store.block_data(relay_parent, candidate_2).is_none());
}
#[test]
fn queues_available_by_queue_root() {
let relay_parent = [1; 32].into();
let para_id = 5.into();
let candidate = [2; 32].into();
let block_data = BlockData(vec![1, 2, 3]);
let message_queue_root_1 = [0x42; 32].into();
let message_queue_root_2 = [0x43; 32].into();
assert!(store.extrinsic(relay_parent, candidate_1).is_some()); let message_a = Message(vec![1, 2, 3, 4]);
assert!(store.extrinsic(relay_parent, candidate_2).is_none()); let message_b = Message(vec![4, 5, 6, 7]);
let outgoing_queues = vec![
(message_queue_root_1, vec![message_a.clone()]),
(message_queue_root_2, vec![message_b.clone()]),
];
let store = Store::new_in_memory();
store.make_available(Data {
relay_parent,
parachain_id: para_id,
candidate_hash: candidate,
block_data: block_data.clone(),
outgoing_queues: Some(outgoing_queues),
}).unwrap();
assert_eq!(
store.queue_by_root(&message_queue_root_1),
Some(vec![message_a]),
);
assert_eq!(
store.queue_by_root(&message_queue_root_2),
Some(vec![message_b]),
);
} }
} }
...@@ -57,7 +57,7 @@ use primitives::Pair; ...@@ -57,7 +57,7 @@ use primitives::Pair;
use polkadot_primitives::{ use polkadot_primitives::{
BlockId, Hash, Block, BlockId, Hash, Block,
parachain::{ parachain::{
self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId, Extrinsic, self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId, OutgoingMessages,
PoVBlock, Status as ParachainStatus, ValidatorId, CollatorPair, PoVBlock, Status as ParachainStatus, ValidatorId, CollatorPair,
} }
}; };
...@@ -65,8 +65,8 @@ use polkadot_cli::{ ...@@ -65,8 +65,8 @@ use polkadot_cli::{
Worker, IntoExit, ProvideRuntimeApi, TaskExecutor, AbstractService, Worker, IntoExit, ProvideRuntimeApi, TaskExecutor, AbstractService,
CustomConfiguration, ParachainHost, CustomConfiguration, ParachainHost,
}; };
use polkadot_network::validation::{SessionParams, ValidationNetwork}; use polkadot_network::validation::{LeafWorkParams, ValidationNetwork};
use polkadot_network::{NetworkService, PolkadotProtocol}; use polkadot_network::{PolkadotNetworkService, PolkadotProtocol};
use tokio::timer::Timeout; use tokio::timer::Timeout;
pub use polkadot_cli::VersionInfo; pub use polkadot_cli::VersionInfo;
...@@ -91,7 +91,7 @@ pub trait Network: Send + Sync { ...@@ -91,7 +91,7 @@ pub trait Network: Send + Sync {
fn checked_statements(&self, relay_parent: Hash) -> Box<dyn Stream<Item=SignedStatement, Error=()>>; fn checked_statements(&self, relay_parent: Hash) -> Box<dyn Stream<Item=SignedStatement, Error=()>>;
} }
impl<P, E> Network for ValidationNetwork<P, E, NetworkService, TaskExecutor> where impl<P, E> Network for ValidationNetwork<P, E, PolkadotNetworkService, TaskExecutor> where
P: 'static + Send + Sync, P: 'static + Send + Sync,
E: 'static + Send + Sync, E: 'static + Send + Sync,
{ {
...@@ -142,7 +142,7 @@ pub trait BuildParachainContext { ...@@ -142,7 +142,7 @@ pub trait BuildParachainContext {
/// This can be implemented through an externally attached service or a stub. /// This can be implemented through an externally attached service or a stub.
/// This is expected to be a lightweight, shared type like an Arc. /// This is expected to be a lightweight, shared type like an Arc.
pub trait ParachainContext: Clone { pub trait ParachainContext: Clone {
type ProduceCandidate: IntoFuture<Item=(BlockData, HeadData, Extrinsic), Error=InvalidHead>; type ProduceCandidate: IntoFuture<Item=(BlockData, HeadData, OutgoingMessages), Error=InvalidHead>;
/// Produce a candidate, given the relay parent hash, the latest ingress queue information /// Produce a candidate, given the relay parent hash, the latest ingress queue information
/// and the last parachain head. /// and the last parachain head.
...@@ -177,7 +177,7 @@ pub fn collate<'a, R, P>( ...@@ -177,7 +177,7 @@ pub fn collate<'a, R, P>(
para_context: P, para_context: P,
key: Arc<CollatorPair>, key: Arc<CollatorPair>,
) )
-> impl Future<Item=parachain::Collation, Error=Error<R::Error>> + 'a -> impl Future<Item=(parachain::Collation, OutgoingMessages), Error=Error<R::Error>> + 'a
where where
R: RelayChainContext, R: RelayChainContext,
R::Error: 'a, R::Error: 'a,
...@@ -197,11 +197,11 @@ pub fn collate<'a, R, P>( ...@@ -197,11 +197,11 @@ pub fn collate<'a, R, P>(
.map(move |x| (ingress, x)) .map(move |x| (ingress, x))
.map_err(Error::Collator) .map_err(Error::Collator)
}) })
.and_then(move |(ingress, (block_data, head_data, mut extrinsic))| { .and_then(move |(ingress, (block_data, head_data, mut outgoing))| {
let block_data_hash = block_data.hash(); let block_data_hash = block_data.hash();
let signature = key.sign(block_data_hash.as_ref()).into(); let signature = key.sign(block_data_hash.as_ref()).into();
let egress_queue_roots = let egress_queue_roots =
polkadot_validation::egress_roots(&mut extrinsic.outgoing_messages); polkadot_validation::egress_roots(&mut outgoing.outgoing_messages);
let receipt = parachain::CandidateReceipt { let receipt = parachain::CandidateReceipt {
parachain_index: local_id, parachain_index: local_id,
...@@ -214,19 +214,21 @@ pub fn collate<'a, R, P>( ...@@ -214,19 +214,21 @@ pub fn collate<'a, R, P>(
upward_messages: Vec::new(), upward_messages: Vec::new(),
}; };
Ok(parachain::Collation { let collation = parachain::Collation {
receipt, receipt,
pov: PoVBlock { pov: PoVBlock {
block_data, block_data,
ingress, ingress,
}, },
}) };
Ok((collation, outgoing))
}) })
} }
/// Polkadot-api context. /// Polkadot-api context.
struct ApiContext<P, E> { struct ApiContext<P, E> {
network: Arc<ValidationNetwork<P, E, NetworkService, TaskExecutor>>, network: Arc<ValidationNetwork<P, E, PolkadotNetworkService, TaskExecutor>>,
parent_hash: Hash, parent_hash: Hash,
validators: Vec<ValidatorId>, validators: Vec<ValidatorId>,
} }
...@@ -243,7 +245,7 @@ impl<P: 'static, E: 'static> RelayChainContext for ApiContext<P, E> where ...@@ -243,7 +245,7 @@ impl<P: 'static, E: 'static> RelayChainContext for ApiContext<P, E> where
// TODO: https://github.com/paritytech/polkadot/issues/253 // TODO: https://github.com/paritytech/polkadot/issues/253
// //
// Fetch ingress and accumulate all unrounted egress // Fetch ingress and accumulate all unrounted egress
let _session = self.network.instantiate_session(SessionParams { let _session = self.network.instantiate_leaf_work(LeafWorkParams {
local_session_key: None, local_session_key: None,
parent_hash: self.parent_hash, parent_hash: self.parent_hash,
authorities: self.validators.clone(), authorities: self.validators.clone(),
...@@ -303,26 +305,28 @@ impl<P, E> Worker for CollationNode<P, E> where ...@@ -303,26 +305,28 @@ impl<P, E> Worker for CollationNode<P, E> where
return Box::new(future::err(())); return Box::new(future::err(()));
}; };
let is_known = move |block_hash: &Hash| {
use client::BlockStatus;
use polkadot_network::gossip::Known;
match known_oracle.block_status(&BlockId::hash(*block_hash)) {
Err(_) | Ok(BlockStatus::Unknown) | Ok(BlockStatus::Queued) => None,
Ok(BlockStatus::KnownBad) => Some(Known::Bad),
Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) =>
match select_chain.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
}
};
let message_validator = polkadot_network::gossip::register_validator( let message_validator = polkadot_network::gossip::register_validator(
network.clone(), network.clone(),
move |block_hash: &Hash| { (is_known, client.clone()),
use client::BlockStatus;
use polkadot_network::gossip::Known;
match known_oracle.block_status(&BlockId::hash(*block_hash)) {
Err(_) | Ok(BlockStatus::Unknown) | Ok(BlockStatus::Queued) => None,
Ok(BlockStatus::KnownBad) => Some(Known::Bad),
Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) =>
match select_chain.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
}
},
); );
let validation_network = Arc::new(ValidationNetwork::new( let validation_network = Arc::new(ValidationNetwork::new(
...@@ -386,13 +390,20 @@ impl<P, E> Worker for CollationNode<P, E> where ...@@ -386,13 +390,20 @@ impl<P, E> Worker for CollationNode<P, E> where
context, context,
parachain_context, parachain_context,
key, key,
).map(move |collation| { ).map(move |(collation, outgoing)| {
network.with_spec(move |spec, ctx| spec.add_local_collation( network.with_spec(move |spec, ctx| {
ctx, let res = spec.add_local_collation(
relay_parent, ctx,
targets, relay_parent,
collation, targets,
)); collation,
outgoing,
);
if let Err(e) = res {
warn!("Unable to broadcast local collation: {:?}", e);
}
})
}); });
future::Either::B(collation_work) future::Either::B(collation_work)
...@@ -450,7 +461,7 @@ pub fn run_collator<P, E>( ...@@ -450,7 +461,7 @@ pub fn run_collator<P, E>(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashMap; use std::collections::HashMap;
use polkadot_primitives::parachain::{OutgoingMessage, FeeSchedule}; use polkadot_primitives::parachain::{TargetedMessage, FeeSchedule};
use keyring::Sr25519Keyring; use keyring::Sr25519Keyring;
use super::*; use super::*;
...@@ -475,20 +486,20 @@ mod tests { ...@@ -475,20 +486,20 @@ mod tests {
struct DummyParachainContext; struct DummyParachainContext;
impl ParachainContext for DummyParachainContext { impl ParachainContext for DummyParachainContext {
type ProduceCandidate = Result<(BlockData, HeadData, Extrinsic), InvalidHead>; type ProduceCandidate = Result<(BlockData, HeadData, OutgoingMessages), InvalidHead>;
fn produce_candidate<I: IntoIterator<Item=(ParaId, Message)>>( fn produce_candidate<I: IntoIterator<Item=(ParaId, Message)>>(
&self, &self,
_relay_parent: Hash, _relay_parent: Hash,
_status: ParachainStatus, _status: ParachainStatus,
ingress: I, ingress: I,
) -> Result<(BlockData, HeadData, Extrinsic), InvalidHead> { ) -> Result<(BlockData, HeadData, OutgoingMessages), InvalidHead> {
// send messages right back. // send messages right back.
Ok(( Ok((
BlockData(vec![1, 2, 3, 4, 5,]), BlockData(vec![1, 2, 3, 4, 5,]),
HeadData(vec![9, 9, 9]), HeadData(vec![9, 9, 9]),
Extrinsic { OutgoingMessages {
outgoing_messages: ingress.into_iter().map(|(id, msg)| OutgoingMessage { outgoing_messages: ingress.into_iter().map(|(id, msg)| TargetedMessage {
target: id, target: id,
data: msg.0, data: msg.0,
}).collect(), }).collect(),
...@@ -542,7 +553,7 @@ mod tests { ...@@ -542,7 +553,7 @@ mod tests {
context.clone(), context.clone(),
DummyParachainContext, DummyParachainContext,
Arc::new(Sr25519Keyring::Alice.pair().into()), Arc::new(Sr25519Keyring::Alice.pair().into()),
).wait().unwrap(); ).wait().unwrap().0;
// ascending order by root. // ascending order by root.
assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]); assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]);
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
use codec::{Encode, Decode}; use codec::{Encode, Decode};
use reed_solomon::galois_16::{self, ReedSolomon}; use reed_solomon::galois_16::{self, ReedSolomon};
use primitives::{Hash as H256, BlakeTwo256, HashT}; use primitives::{Hash as H256, BlakeTwo256, HashT};
use primitives::parachain::{BlockData, Extrinsic}; use primitives::parachain::{BlockData, OutgoingMessages};
use substrate_primitives::Blake2Hasher; use substrate_primitives::Blake2Hasher;
use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}}; use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}};
...@@ -124,11 +124,11 @@ fn code_params(n_validators: usize) -> Result<CodeParams, Error> { ...@@ -124,11 +124,11 @@ fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
/// Obtain erasure-coded chunks, one for each validator. /// Obtain erasure-coded chunks, one for each validator.
/// ///
/// Works only up to 65536 validators, and `n_validators` must be non-zero. /// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, extrinsic: &Extrinsic) pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, outgoing: &OutgoingMessages)
-> Result<Vec<Vec<u8>>, Error> -> Result<Vec<Vec<u8>>, Error>
{ {
let params = code_params(n_validators)?; let params = code_params(n_validators)?;
let encoded = (block_data, extrinsic).encode(); let encoded = (block_data, outgoing).encode();
if encoded.is_empty() { if encoded.is_empty() {
return Err(Error::BadPayload); return Err(Error::BadPayload);
...@@ -150,7 +150,7 @@ pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, extrinsic: &Ex ...@@ -150,7 +150,7 @@ pub fn obtain_chunks(n_validators: usize, block_data: &BlockData, extrinsic: &Ex
/// ///
/// Works only up to 65536 validators, and `n_validators` must be non-zero. /// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I) pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I)
-> Result<(BlockData, Extrinsic), Error> -> Result<(BlockData, OutgoingMessages), Error>
where I: IntoIterator<Item=(&'a [u8], usize)> where I: IntoIterator<Item=(&'a [u8], usize)>
{ {
let params = code_params(n_validators)?; let params = code_params(n_validators)?;
...@@ -399,7 +399,7 @@ mod tests { ...@@ -399,7 +399,7 @@ mod tests {
#[test] #[test]
fn round_trip_block_data() { fn round_trip_block_data() {
let block_data = BlockData((0..255).collect()); let block_data = BlockData((0..255).collect());
let ex = Extrinsic { outgoing_messages: Vec::new() }; let ex = OutgoingMessages { outgoing_messages: Vec::new() };
let chunks = obtain_chunks( let chunks = obtain_chunks(
10, 10,
&block_data, &block_data,
...@@ -428,7 +428,7 @@ mod tests { ...@@ -428,7 +428,7 @@ mod tests {
let chunks = obtain_chunks( let chunks = obtain_chunks(
10, 10,
&block_data, &block_data,
&Extrinsic { outgoing_messages: Vec::new() }, &OutgoingMessages { outgoing_messages: Vec::new() },
).unwrap(); ).unwrap();
let chunks: Vec<_> = chunks.iter().map(|c| &c[..]).collect(); let chunks: Vec<_> = chunks.iter().map(|c| &c[..]).collect();
......
...@@ -18,7 +18,7 @@ sr-primitives = { git = "https://github.com/paritytech/substrate", branch = "pol ...@@ -18,7 +18,7 @@ sr-primitives = { git = "https://github.com/paritytech/substrate", branch = "pol
futures = "0.1" futures = "0.1"
log = "0.4"