Unverified Commit 832f8054 authored by Ashley's avatar Ashley
Browse files

Merge remote-tracking branch 'parity/master' into ashley-compile-to-wasm

parents 4e1da888 78e828d8
......@@ -16,6 +16,7 @@ image: parity/rust-builder:latest
variables:
GIT_STRATEGY: fetch
GIT_DEPTH: 3
CARGO_HOME: "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_JOB_NAME}"
SCCACHE_DIR: "/ci-cache/${CI_PROJECT_NAME}/sccache"
CI_SERVER_NAME: "GitLab CI"
......@@ -44,6 +45,8 @@ variables:
- runner_system_failure
- unknown_failure
- api_failure
interruptible: true
dependencies: []
tags:
- linux-docker
......@@ -93,6 +96,7 @@ check-runtime:
GITHUB_API_PROJECT: "parity%2Finfrastructure%2Fgithub-api"
script:
- ./scripts/gitlab/check_runtime.sh
interruptible: true
allow_failure: true
......@@ -104,6 +108,7 @@ check-line-width:
- /^[0-9]+$/
script:
- ./scripts/gitlab/check_line_width.sh
interruptible: true
allow_failure: true
......@@ -139,7 +144,6 @@ check-web-wasm: &test
script:
# WASM support is in progress. As more and more crates support WASM, we
# should add entries here. See https://github.com/paritytech/polkadot/issues/625
- time cargo build --locked --target=wasm32-unknown-unknown --manifest-path availability-store/Cargo.toml
- time cargo build --locked --target=wasm32-unknown-unknown --manifest-path executor/Cargo.toml
- time cargo build --locked --target=wasm32-unknown-unknown --manifest-path erasure-coding/Cargo.toml
- time cargo build --locked --target=wasm32-unknown-unknown --manifest-path parachain/Cargo.toml
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -4,7 +4,7 @@ path = "src/main.rs"
[package]
name = "polkadot"
version = "0.7.2"
version = "0.7.7"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"
edition = "2018"
......
---
author: Polkadot developers
revision: 0.3.0
revision: 0.3.1
---
# Polkadot
......@@ -15,7 +15,7 @@ We are actively building both Substrate and Polkadot, but things will be a littl
To connect on the "Kusama" canary network, you will want the `v0.7` code, which is in this **Polkadot** repo. To play on the ("Alexander") testnet, you'll want the PoC-4 code instead. Note that PoC-3 uses the Alexander testnet, but will not be able to sync to the latest block.
* **Kusama CC-3** is in this [**Polkadot**][polkadot-v0.7] repo branch `v0.7`.
* **Kusama CC-3** is in this [**Polkadot**] repo `master` branch.
* **Kusama CC-2** is in this [**Polkadot**][polkadot-v0.6] repo branch `v0.6`.
......@@ -28,7 +28,6 @@ To connect on the "Kusama" canary network, you will want the `v0.7` code, which
* **Polkadot PoC-2 "Krumme Lanke"** is in the [**Substrate**][substrate-v0.2] repo branch `v0.2`.
[substrate-repo]: https://github.com/paritytech/substrate
[polkadot-v0.7]: https://github.com/paritytech/polkadot/tree/v0.7
[polkadot-v0.6]: https://github.com/paritytech/polkadot/tree/v0.6
[polkadot-v0.5]: https://github.com/paritytech/polkadot/tree/v0.5
[polkadot-v0.4]: https://github.com/paritytech/polkadot/tree/v0.4
......@@ -64,7 +63,7 @@ rustup update
Build Kusama by cloning this repository and running the following commands from the root directory of the repo:
```bash
git checkout v0.7
git checkout master
./scripts/init.sh
cargo build --release
```
......
[package]
name = "polkadot-availability-store"
description = "Persistent database for parachain data"
version = "0.7.2"
version = "0.7.7"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
polkadot-primitives = { path = "../primitives" }
polkadot-erasure-coding = { path = "../erasure-coding" }
polkadot-runtime = { path = "../runtime" }
parking_lot = "0.9.0"
derive_more = "0.99"
log = "0.4.8"
futures01 = "0.1.17"
futures = { package = "futures", version = "0.3.1", features = ["compat"] }
tokio = { version = "0.2.1", features = ["rt-core"] }
exit-future = "0.2.0"
codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
sc-client = { git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "ashley-polkadot-wasm" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "ashley-polkadot-wasm" }
kvdb = "0.1.1"
kvdb-memorydb = "0.1.2"
......
......@@ -14,28 +14,53 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Persistent database for parachain data: PoV block data and outgoing messages.
//! Persistent database for parachain data: PoV block data, erasure-coding chunks and outgoing messages.
//!
//! This will be written into during the block validation pipeline, and queried
//! by networking code in order to circulate required data and maintain availability
//! of it.
use codec::{Encode, Decode};
use kvdb::{KeyValueDB, DBTransaction};
use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Message};
#![warn(missing_docs)]
use futures::prelude::*;
use futures::{channel::{mpsc, oneshot}, task::{Spawn, SpawnExt}};
use keystore::KeyStorePtr;
use polkadot_primitives::{
Hash, Block,
parachain::{
Id as ParaId, BlockData, CandidateReceipt, Message, AvailableMessages, ErasureChunk,
ParachainHost,
},
};
use sp_runtime::traits::{BlakeTwo256, Hash as HashT, ProvideRuntimeApi};
use sp_blockchain::{Result as ClientResult};
use client::{
BlockchainEvents, BlockBody,
};
use sp_api::ApiExt;
use log::warn;
use std::sync::Arc;
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::io;
mod columns {
pub const DATA: Option<u32> = Some(0);
pub const META: Option<u32> = Some(1);
pub const NUM_COLUMNS: u32 = 2;
}
mod worker;
mod store;
pub use worker::AvailabilityBlockImport;
use worker::{
Worker, WorkerHandle, Chunks, ParachainBlocks, WorkerMsg, MakeAvailable,
};
use store::{Store as InnerStore};
/// Abstraction over an executor that lets you spawn tasks in the background.
pub(crate) type TaskExecutor = Arc<dyn Spawn + Send + Sync>;
const LOG_TARGET: &str = "availability";
/// Configuration for the availability store.
pub struct Config {
......@@ -45,67 +70,153 @@ pub struct Config {
pub path: PathBuf,
}
/// Compute gossip topic for the erasure chunk messages given the relay parent,
/// root and the chunk index.
///
/// Since at this point we are not able to use [`network`] directly, but both
/// of them need to compute these topics, this lives here and not there.
///
/// [`network`]: ../polkadot_network/index.html
pub fn erasure_coding_topic(relay_parent: Hash, erasure_root: Hash, index: u32) -> Hash {
let mut v = relay_parent.as_ref().to_vec();
v.extend(erasure_root.as_ref());
v.extend(&index.to_le_bytes()[..]);
v.extend(b"erasure_chunks");
BlakeTwo256::hash(&v[..])
}
/// A trait that provides a shim for the [`NetworkService`] trait.
///
/// Currently it is not possible to use the networking code in the availability store
/// core directly due to a number of loop dependencies it require:
///
/// `availability-store` -> `network` -> `availability-store`
///
/// `availability-store` -> `network` -> `validation` -> `availability-store`
///
/// So we provide this shim trait that gets implemented for a wrapper newtype in
/// the [`network`] module.
///
/// [`NetworkService`]: ../polkadot_network/trait.NetworkService.html
/// [`network`]: ../polkadot_network/index.html
pub trait ProvideGossipMessages {
/// Get a stream of gossip erasure chunk messages for a given topic.
///
/// Each item is a tuple (relay_parent, candidate_hash, erasure_chunk)
fn gossip_messages_for(
&self,
topic: Hash,
) -> Box<dyn Stream<Item = (Hash, Hash, ErasureChunk)> + Send + Unpin>;
/// Gossip an erasure chunk message.
fn gossip_erasure_chunk(
&self,
relay_parent: Hash,
candidate_hash: Hash,
erasure_root: Hash,
chunk: ErasureChunk,
);
}
/// Some data to keep available about a parachain block candidate.
#[derive(Debug)]
pub struct Data {
/// The relay chain parent hash this should be localized to.
pub relay_parent: Hash,
/// The parachain index for this candidate.
pub parachain_id: ParaId,
/// Unique candidate receipt hash.
pub candidate_hash: Hash,
/// Block data.
pub block_data: BlockData,
/// Outgoing message queues from execution of the block, if any.
///
/// The tuple pairs the message queue root and the queue data.
pub outgoing_queues: Option<Vec<(Hash, Vec<Message>)>>,
}
fn block_data_key(relay_parent: &Hash, candidate_hash: &Hash) -> Vec<u8> {
(relay_parent, candidate_hash, 0i8).encode()
pub outgoing_queues: Option<AvailableMessages>,
}
/// Handle to the availability store.
///
/// This provides a proxying API that
/// * in case of write operations provides async methods that send data to
/// the background worker and resolve when that data is processed by the worker
/// * in case of read opeartions queries the underlying storage synchronously.
#[derive(Clone)]
pub struct Store {
inner: Arc<dyn KeyValueDB>,
inner: InnerStore,
worker: Arc<WorkerHandle>,
to_worker: mpsc::UnboundedSender<WorkerMsg>,
}
impl Store {
/// Create a new `Store` with given config on disk.
#[cfg(not(target_os = "unknown"))]
pub fn new(config: Config) -> io::Result<Self> {
use kvdb_rocksdb::{Database, DatabaseConfig};
let mut db_config = DatabaseConfig::with_columns(Some(columns::NUM_COLUMNS));
if let Some(cache_size) = config.cache_size {
let mut memory_budget = std::collections::HashMap::new();
for i in 0..columns::NUM_COLUMNS {
memory_budget.insert(Some(i), cache_size / columns::NUM_COLUMNS as usize);
}
db_config.memory_budget = memory_budget;
}
let path = config.path.to_str().ok_or_else(|| io::Error::new(
io::ErrorKind::Other,
format!("Bad database path: {:?}", config.path),
))?;
let db = Database::open(&db_config, &path)?;
Ok(Store {
inner: Arc::new(db),
/// Create a new `Store` with given condig on disk.
///
/// Creating a store among other things starts a background worker thread which
/// handles most of the write operations to the storage.
pub fn new<PGM>(config: Config, gossip: PGM) -> io::Result<Self>
where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static
{
let inner = InnerStore::new(config)?;
let worker = Arc::new(Worker::start(inner.clone(), gossip));
let to_worker = worker.to_worker().clone();
Ok(Self {
inner,
worker,
to_worker,
})
}
/// Create a new `Store` in-memory. Useful for tests.
pub fn new_in_memory() -> Self {
Store {
inner: Arc::new(::kvdb_memorydb::create(columns::NUM_COLUMNS)),
///
/// Creating a store among other things starts a background worker thread
/// which handles most of the write operations to the storage.
pub fn new_in_memory<PGM>(gossip: PGM) -> Self
where PGM: ProvideGossipMessages + Send + Sync + Clone + 'static
{
let inner = InnerStore::new_in_memory();
let worker = Arc::new(Worker::start(inner.clone(), gossip));
let to_worker = worker.to_worker().clone();
Self {
inner,
worker,
to_worker,
}
}
/// Obtain a [`BlockImport`] implementation to import blocks into this store.
///
/// This block import will act upon all newly imported blocks sending information
/// about parachain heads included in them to this `Store`'s background worker.
/// The user may create multiple instances of [`BlockImport`]s with this call.
///
/// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html
pub fn block_import<I, P>(
&self,
wrapped_block_import: I,
client: Arc<P>,
thread_pool: TaskExecutor,
keystore: KeyStorePtr,
) -> ClientResult<(AvailabilityBlockImport<I, P>)>
where
P: ProvideRuntimeApi + BlockchainEvents<Block> + BlockBody<Block> + Send + Sync + 'static,
P::Api: ParachainHost<Block>,
P::Api: ApiExt<Block, Error=sp_blockchain::Error>,
{
let to_worker = self.to_worker.clone();
let import = AvailabilityBlockImport::new(
self.inner.clone(),
client,
wrapped_block_import,
thread_pool,
keystore,
to_worker,
);
Ok(import)
}
/// Make some data available provisionally.
///
/// Validators with the responsibility of maintaining availability
......@@ -117,174 +228,164 @@ impl Store {
/// to be present with the exception of the case where there is no message data
/// due to the block's invalidity. Determination of invalidity is beyond the
/// scope of this function.
pub fn make_available(&self, data: Data) -> io::Result<()> {
let mut tx = DBTransaction::new();
// note the meta key.
let mut v = match self.inner.get(columns::META, data.relay_parent.as_ref()) {
Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"),
Ok(None) => Vec::new(),
Err(e) => {
warn!(target: "availability", "Error reading from availability store: {:?}", e);
Vec::new()
}
};
v.push(data.candidate_hash);
tx.put_vec(columns::META, &data.relay_parent[..], v.encode());
tx.put_vec(
columns::DATA,
block_data_key(&data.relay_parent, &data.candidate_hash).as_slice(),
data.block_data.encode()
);
if let Some(outgoing_queues) = data.outgoing_queues {
// This is kept forever and not pruned.
for (root, messages) in outgoing_queues {
tx.put_vec(
columns::DATA,
root.as_ref(),
messages.encode(),
);
}
///
/// This method will send the `Data` to the background worker, allowing caller to
/// asynchrounously wait for the result.
pub async fn make_available(&self, data: Data) -> io::Result<()> {
let (s, r) = oneshot::channel();
let msg = WorkerMsg::MakeAvailable(MakeAvailable {
data,
result: s,
});
let _ = self.to_worker.unbounded_send(msg);
if let Ok(Ok(())) = r.await {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
self.inner.write(tx)
}
/// Note that a set of candidates have been included in a finalized block with given hash and parent hash.
pub fn candidates_finalized(&self, parent: Hash, finalized_candidates: HashSet<Hash>) -> io::Result<()> {
let mut tx = DBTransaction::new();
let v = match self.inner.get(columns::META, &parent[..]) {
Ok(Some(raw)) => Vec::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"),
Ok(None) => Vec::new(),
Err(e) => {
warn!(target: "availability", "Error reading from availability store: {:?}", e);
Vec::new()
}
};
tx.delete(columns::META, &parent[..]);
for candidate_hash in v {
if !finalized_candidates.contains(&candidate_hash) {
tx.delete(columns::DATA, block_data_key(&parent, &candidate_hash).as_slice());
}
}
/// Get a set of all chunks we are waiting for grouped by
/// `(relay_parent, erasure_root, candidate_hash, our_id)`.
pub fn awaited_chunks(&self) -> Option<HashSet<(Hash, Hash, Hash, u32)>> {
self.inner.awaited_chunks()
}
self.inner.write(tx)
/// Qery which candidates were included in the relay chain block by block's parent.
pub fn get_candidates_in_relay_block(&self, relay_block: &Hash) -> Option<Vec<Hash>> {
self.inner.get_candidates_in_relay_block(relay_block)
}
/// Query block data.
pub fn block_data(&self, relay_parent: Hash, candidate_hash: Hash) -> Option<BlockData> {
let encoded_key = block_data_key(&relay_parent, &candidate_hash);
match self.inner.get(columns::DATA, &encoded_key[..]) {
Ok(Some(raw)) => Some(
BlockData::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
),
Ok(None) => None,
Err(e) => {
warn!(target: "availability", "Error reading from availability store: {:?}", e);
None
}
}
/// Make a validator's index and a number of validators at a relay parent available.
///
/// This information is needed before the `add_candidates_in_relay_block` is called
/// since that call forms the awaited frontier of chunks.
/// In the current implementation this function is called in the `get_or_instantiate` at
/// the start of the parachain agreement process on top of some parent hash.
pub fn add_validator_index_and_n_validators(
&self,
relay_parent: &Hash,
validator_index: u32,
n_validators: u32,
) -> io::Result<()> {
self.inner.add_validator_index_and_n_validators(
relay_parent,
validator_index,
n_validators,
)
}
/// Query message queue data by message queue root hash.
pub fn queue_by_root(&self, queue_root: &Hash) -> Option<Vec<Message>> {
match self.inner.get(columns::DATA, queue_root.as_ref()) {
Ok(Some(raw)) => Some(
<_>::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed")
),
Ok(None) => None,
Err(e) => {
warn!(target: "availability", "Error reading from availability store: {:?}", e);
None
}
}
/// Query a validator's index and n_validators by relay parent.
pub fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> {
self.inner.get_validator_index_and_n_validators(relay_parent)
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Adds an erasure chunk to storage.
///
/// The chunk should be checked for validity against the root of encoding
/// and its proof prior to calling this.
///
/// This method will send the chunk to the background worker, allowing caller to
/// asynchrounously wait for the result.
pub async fn add_erasure_chunk(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
chunk: ErasureChunk,
) -> io::Result<()> {
self.add_erasure_chunks(relay_parent, receipt, vec![chunk]).await
}
#[test]
fn finalization_removes_unneeded() {
let relay_parent = [1; 32].into();
/// Adds a set of erasure chunks to storage.
///
/// The chunks should be checked for validity against the root of encoding
/// and it's proof prior to calling this.
///
/// This method will send the chunks to the background worker, allowing caller to
/// asynchrounously waiting for the result.
pub async fn add_erasure_chunks<I>(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
chunks: I,
) -> io::Result<()>
where I: IntoIterator<Item = ErasureChunk>
{
self.add_candidate(relay_parent, receipt.clone()).await?;
let (s, r) = oneshot::channel();
let chunks = chunks.into_iter().collect();
let candidate_hash = receipt.hash();
let msg = WorkerMsg::Chunks(Chunks {
relay_parent,
candidate_hash,
chunks,
result: s,
});
let para_id_1 = 5.into();
let para_id_2 = 6.into();
let _ = self.to_worker.unbounded_send(msg);
let candidate_1 = [2; 32].into();
let candidate_2 = [3; 32].into();
if let Ok(Ok(())) = r.await {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed")))
}
}
let block_data_1 = BlockData(vec![1, 2, 3]);
let block_data_2 = BlockData(vec![4, 5, 6]);
/// Queries an erasure chunk by its block's parent and hash and index.
pub fn get_erasure_chunk(
&self,
relay_parent: &Hash,
block_data_hash: Hash,
index: usize,
) -> Option<ErasureChunk> {
self.inner.get_erasure_chunk(relay_parent, block_data_hash, index)
}
let store = Store::new_in_memory();
store.make_available(Data {
relay_parent,
parachain_id: para_id_1,
candidate_hash: candidate_1,
block_data: block_data_1.clone(),
outgoing_queues: None,
}).unwrap();
/// Stores a candidate receipt.
pub async fn add_candidate(
&self,
relay_parent: Hash,
receipt: CandidateReceipt,
) -> io::Result<()> {
let (s, r) = oneshot::channel();
store.make_available(Data {
let msg = WorkerMsg::ParachainBlocks(ParachainBlocks {
relay_parent,
parachain_id: para_id_2,
candidate_hash: candidate_2,
block_data: block_data_2.clone(),
outgoing_queues: None,
}).unwrap();
assert_eq!(store.block_data(relay_parent, candidate_1).unwrap(), block_data_1);
assert_eq!(store.block_data(relay_parent, candidate_2).unwrap(), block_data_2);