Skip to content
Snippets Groups Projects
Unverified Commit 30ecd855 authored by Alin Dima's avatar Alin Dima Committed by GitHub
Browse files

bump reed-solomon-novelpoly version (#3065)

also remove some dead code and deduplicate some error handling

the new release brings performance improvements and support for
systematic chunk recovery, needed in:
https://github.com/paritytech/polkadot-sdk/pull/1644
parent 66332531
No related merge requests found
Pipeline #438407 failed with stages
in 1 hour, 7 minutes, and 17 seconds
......@@ -14467,14 +14467,13 @@ dependencies = [
[[package]]
name = "reed-solomon-novelpoly"
version = "1.0.0"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bd8f48b2066e9f69ab192797d66da804d1935bf22763204ed3675740cb0f221"
checksum = "87413ebb313323d431e85d0afc5a68222aaed972843537cbfe5f061cf1b4bcab"
dependencies = [
"derive_more",
"fs-err",
"itertools 0.10.5",
"static_init 0.5.2",
"static_init",
"thiserror",
]
......@@ -16430,7 +16429,7 @@ dependencies = [
"sp-transaction-storage-proof",
"sp-trie",
"sp-version",
"static_init 1.0.3",
"static_init",
"substrate-prometheus-endpoint",
"substrate-test-runtime",
"substrate-test-runtime-client",
......@@ -19230,18 +19229,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "static_init"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11b73400442027c4adedda20a9f9b7945234a5bd8d5f7e86da22bd5d0622369c"
dependencies = [
"cfg_aliases",
"libc",
"parking_lot 0.11.2",
"static_init_macro 0.5.0",
]
[[package]]
name = "static_init"
version = "1.0.3"
......@@ -19253,23 +19240,10 @@ dependencies = [
"libc",
"parking_lot 0.11.2",
"parking_lot_core 0.8.6",
"static_init_macro 1.0.2",
"static_init_macro",
"winapi",
]
[[package]]
name = "static_init_macro"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2261c91034a1edc3fc4d1b80e89d82714faede0515c14a75da10cb941546bbf"
dependencies = [
"cfg_aliases",
"memchr",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "static_init_macro"
version = "1.0.2"
......
......@@ -12,7 +12,7 @@ workspace = true
[dependencies]
polkadot-primitives = { path = "../primitives" }
polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" }
novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" }
novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" }
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] }
sp-core = { path = "../../substrate/primitives/core" }
sp-trie = { path = "../../substrate/primitives/trie" }
......
......@@ -83,6 +83,20 @@ pub enum Error {
UnknownCodeParam,
}
impl From<novelpoly::Error> for Error {
fn from(error: novelpoly::Error) -> Self {
match error {
novelpoly::Error::NeedMoreShards { .. } => Self::NotEnoughChunks,
novelpoly::Error::ParamterMustBePowerOf2 { .. } => Self::UnevenLength,
novelpoly::Error::WantedShardCountTooHigh(_) => Self::TooManyValidators,
novelpoly::Error::WantedShardCountTooLow(_) => Self::NotEnoughValidators,
novelpoly::Error::PayloadSizeIsZero { .. } => Self::BadPayload,
novelpoly::Error::InconsistentShardLengths { .. } => Self::NonUniformChunks,
_ => Self::UnknownReconstruction,
}
}
}
/// Obtain a threshold of chunks that should be enough to recover the data.
pub const fn recovery_threshold(n_validators: usize) -> Result<usize, Error> {
if n_validators > MAX_VALIDATORS {
......@@ -166,42 +180,17 @@ where
{
let params = code_params(n_validators)?;
let mut received_shards: Vec<Option<WrappedShard>> = vec![None; n_validators];
let mut shard_len = None;
for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
if chunk_idx >= n_validators {
return Err(Error::ChunkIndexOutOfBounds { chunk_index: chunk_idx, n_validators })
}
let shard_len = shard_len.get_or_insert_with(|| chunk_data.len());
if *shard_len % 2 != 0 {
if chunk_data.len() % 2 != 0 {
return Err(Error::UnevenLength)
}
if *shard_len != chunk_data.len() || *shard_len == 0 {
return Err(Error::NonUniformChunks)
}
received_shards[chunk_idx] = Some(WrappedShard::new(chunk_data.to_vec()));
}
let res = params.make_encoder().reconstruct(received_shards);
let payload_bytes = match res {
Err(e) => match e {
novelpoly::Error::NeedMoreShards { .. } => return Err(Error::NotEnoughChunks),
novelpoly::Error::ParamterMustBePowerOf2 { .. } => return Err(Error::UnevenLength),
novelpoly::Error::WantedShardCountTooHigh(_) => return Err(Error::TooManyValidators),
novelpoly::Error::WantedShardCountTooLow(_) => return Err(Error::NotEnoughValidators),
novelpoly::Error::PayloadSizeIsZero { .. } => return Err(Error::BadPayload),
novelpoly::Error::InconsistentShardLengths { .. } =>
return Err(Error::NonUniformChunks),
_ => return Err(Error::UnknownReconstruction),
},
Ok(payload_bytes) => payload_bytes,
};
Decode::decode(&mut &payload_bytes[..]).or_else(|_e| Err(Error::BadPayload))
let payload_bytes = params.make_encoder().reconstruct(received_shards)?;
Decode::decode(&mut &payload_bytes[..]).map_err(|_| Error::BadPayload)
}
/// An iterator that yields merkle branches and chunk data for all chunks to
......@@ -294,56 +283,6 @@ pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result<H2
}
}
// input for `codec` which draws data from the data shards
struct ShardInput<'a, I> {
remaining_len: usize,
shards: I,
cur_shard: Option<(&'a [u8], usize)>,
}
impl<'a, I: Iterator<Item = &'a [u8]>> parity_scale_codec::Input for ShardInput<'a, I> {
fn remaining_len(&mut self) -> Result<Option<usize>, parity_scale_codec::Error> {
Ok(Some(self.remaining_len))
}
fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> {
let mut read_bytes = 0;
loop {
if read_bytes == into.len() {
break
}
let cur_shard = self.cur_shard.take().or_else(|| self.shards.next().map(|s| (s, 0)));
let (active_shard, mut in_shard) = match cur_shard {
Some((s, i)) => (s, i),
None => break,
};
if in_shard >= active_shard.len() {
continue
}
let remaining_len_out = into.len() - read_bytes;
let remaining_len_shard = active_shard.len() - in_shard;
let write_len = std::cmp::min(remaining_len_out, remaining_len_shard);
into[read_bytes..][..write_len].copy_from_slice(&active_shard[in_shard..][..write_len]);
in_shard += write_len;
read_bytes += write_len;
self.cur_shard = Some((active_shard, in_shard))
}
self.remaining_len -= read_bytes;
if read_bytes == into.len() {
Ok(())
} else {
Err("slice provided too big for input".into())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment