diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock index 2d13d3012fa049d016a3a1d7f187c35732ab575e..b98693c69c1aea66034c458f6e4cfbed1ed77fa2 100644 --- a/polkadot/Cargo.lock +++ b/polkadot/Cargo.lock @@ -5373,6 +5373,7 @@ dependencies = [ "polkadot-erasure-coding", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", @@ -5403,6 +5404,7 @@ dependencies = [ "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", @@ -5478,6 +5480,7 @@ name = "polkadot-erasure-coding" version = "0.8.30" dependencies = [ "parity-scale-codec", + "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", "sp-core", @@ -5590,6 +5593,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "polkadot-erasure-coding", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", @@ -5759,6 +5763,7 @@ dependencies = [ "mick-jaeger", "parity-scale-codec", "parking_lot 0.11.1", + "polkadot-node-primitives", "polkadot-primitives", "sc-network", "sp-core", @@ -5785,15 +5790,18 @@ version = "0.1.0" dependencies = [ "futures 0.3.13", "parity-scale-codec", + "polkadot-parachain", "polkadot-primitives", "polkadot-statement-table", "schnorrkel", + "serde", "sp-application-crypto", "sp-consensus-babe", "sp-consensus-vrf", "sp-core", "sp-runtime", "thiserror", + "zstd", ] [[package]] @@ -5953,7 +5961,6 @@ dependencies = [ "sp-trie", "sp-version", "thiserror", - "zstd", ] [[package]] @@ -6207,6 +6214,7 @@ dependencies = [ "polkadot-node-core-proposer", "polkadot-node-core-provisioner", "polkadot-node-core-runtime-api", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-overseer", diff --git a/polkadot/core-primitives/src/lib.rs b/polkadot/core-primitives/src/lib.rs index e02c3f51b62923393d036d59d63e6f13aaebb12c..0b2921f9c126d5d6bfa9e76bc9803356ce1b94a5 100644 --- a/polkadot/core-primitives/src/lib.rs +++ b/polkadot/core-primitives/src/lib.rs @@ -72,9 +72,9 @@ impl std::fmt::Display for CandidateHash { } impl sp_std::fmt::Debug for CandidateHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - write!(f, "{:?}", self.0) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!(f, "{:?}", self.0) + } } /// Index of a transaction in the relay chain. 32-bit should be plenty. diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 0ec1995726826191e199d4e5562a17cb9b0bd65f..a8a8df79c8776848be8399c4735f6e65018b7f79 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies <admin@parity.io>"] edition = "2018" [dependencies] -primitives = { package = "polkadot-primitives", path = "../primitives" } +polkadot-primitives = { package = "polkadot-primitives", path = "../primitives" } +polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" } parity-scale-codec = { version = "2.0.0", default-features = false, features = ["std", "derive"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs index 314ec483e5ae46f015cb23e6b7a6e47cf79b0e3e..2cae7160443d4b8e0f6afc6021cf44d6d6531923 100644 --- a/polkadot/erasure-coding/src/lib.rs +++ b/polkadot/erasure-coding/src/lib.rs @@ -25,8 +25,8 @@ //! The data is coded so any f+1 chunks can be used to reconstruct the full data. use parity_scale_codec::{Encode, Decode}; -use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT}; -use primitives::v1; +use polkadot_primitives::v0::{self, Hash as H256, BlakeTwo256, HashT}; +use polkadot_node_primitives::AvailableData; use sp_core::Blake2Hasher; use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}}; use thiserror::Error; @@ -122,7 +122,7 @@ pub fn obtain_chunks_v0(n_validators: usize, data: &v0::AvailableData) /// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. -pub fn obtain_chunks_v1(n_validators: usize, data: &v1::AvailableData) +pub fn obtain_chunks_v1(n_validators: usize, data: &AvailableData) -> Result<Vec<Vec<u8>>, Error> { obtain_chunks(n_validators, data) @@ -169,7 +169,7 @@ pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I) /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I) - -> Result<v1::AvailableData, Error> + -> Result<AvailableData, Error> where I: IntoIterator<Item=(&'a [u8], usize)> { reconstruct(n_validators, chunks) @@ -368,14 +368,14 @@ impl<'a, I: Iterator<Item=&'a [u8]>> parity_scale_codec::Input for ShardInput<'a #[cfg(test)] mod tests { use super::*; - use primitives::v0::{AvailableData, BlockData, PoVBlock}; + use polkadot_primitives::v0::{AvailableData, BlockData, PoVBlock}; #[test] fn field_order_is_right_size() { assert_eq!(MAX_VALIDATORS, 65536); } - #[test] + #[test] fn round_trip_works() { let pov_block = PoVBlock { block_data: BlockData((0..255).collect()), diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index fe7a448ac912a3adb0058486ea66b53abb4dd7e6..21b7f4f7ae221a74b29ca6abe736dcc153a608fc 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -26,7 +26,7 @@ use futures::{ sink::SinkExt, stream::StreamExt, }; -use polkadot_node_primitives::CollationGenerationConfig; +use polkadot_node_primitives::{CollationGenerationConfig, AvailableData, PoV}; use polkadot_node_subsystem::{ messages::{AllMessages, CollationGenerationMessage, CollatorProtocolMessage}, FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult, @@ -37,9 +37,9 @@ use polkadot_node_subsystem_util::{ metrics::{self, prometheus}, }; use polkadot_primitives::v1::{ - collator_signature_payload, AvailableData, CandidateCommitments, + collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption, - PersistedValidationData, PoV, + PersistedValidationData, }; use sp_core::crypto::Pair; use std::sync::Arc; @@ -465,7 +465,7 @@ mod tests { task::{Context as FuturesContext, Poll}, Future, }; - use polkadot_node_primitives::{Collation, CollationResult}; + use polkadot_node_primitives::{Collation, CollationResult, BlockData, PoV}; use polkadot_node_subsystem::messages::{ AllMessages, RuntimeApiMessage, RuntimeApiRequest, }; @@ -473,8 +473,8 @@ mod tests { subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::v1::{ - BlockData, BlockNumber, CollatorPair, Id as ParaId, - PersistedValidationData, PoV, ScheduledCore, + BlockNumber, CollatorPair, Id as ParaId, + PersistedValidationData, ScheduledCore, }; use std::pin::Pin; diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 4bdf3ffb6e1731a810f16a36f69d8a5c03480365..b5c2af36e73e3a8975df9a7c1484bc18df8cc90e 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -52,7 +52,7 @@ pub enum RequiredTranches { needed: DelayTranche, /// The amount of missing votes that should be tolerated. tolerated_missing: usize, - /// When the next no-show would be, if any. This is used to schedule the next wakeup in the + /// When the next no-show would be, if any. This is used to schedule the next wakeup in the /// event that there are some assignments that don't have corresponding approval votes. If this /// is `None`, all assignments have approvals. next_no_show: Option<Tick>, diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 61168d378eb5802bfcdd426bf22d88e6ce419a35..9c9745541f8e47b4bf8ac3c0447d9cae8c044ab2 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -37,10 +37,10 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::v1::{ ValidatorIndex, Hash, SessionIndex, SessionInfo, CandidateHash, CandidateReceipt, BlockNumber, PersistedValidationData, - ValidationCode, CandidateDescriptor, PoV, ValidatorPair, ValidatorSignature, ValidatorId, + ValidationCode, CandidateDescriptor, ValidatorPair, ValidatorSignature, ValidatorId, CandidateIndex, GroupIndex, }; -use polkadot_node_primitives::ValidationResult; +use polkadot_node_primitives::{ValidationResult, PoV}; use polkadot_node_primitives::approval::{ IndirectAssignmentCert, IndirectSignedApprovalVote, ApprovalVote, DelayTranche, }; diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 00572ca58bc2631ade3b36238f4e8f03175186b9..80a399d26014e57ffe9ba6b00d450209c482d12b 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -19,6 +19,7 @@ polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsys polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 18200b5ac7a81ce32286aca37529b60b4f30a111..6d3e07c9d3a6153182a03af514e2fb1425664db6 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -32,9 +32,12 @@ use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb::{KeyValueDB, DBTransaction}; use polkadot_primitives::v1::{ - Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, CandidateHash, + Hash, BlockNumber, CandidateEvent, ValidatorIndex, CandidateHash, CandidateReceipt, }; +use polkadot_node_primitives::{ + ErasureChunk, AvailableData, +}; use polkadot_subsystem::{ FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem, ActiveLeavesUpdate, diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index 465fae94a26789276414b88e76d8d302eb8f8a59..939ea4a8e03253ea0861165f2b47bea17bc394bf 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -25,10 +25,11 @@ use futures::{ }; use polkadot_primitives::v1::{ - AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData, - PersistedValidationData, PoV, Id as ParaId, CandidateHash, Header, ValidatorId, + CandidateDescriptor, CandidateReceipt, HeadData, + PersistedValidationData, Id as ParaId, CandidateHash, Header, ValidatorId, CoreIndex, GroupIndex, }; +use polkadot_node_primitives::{AvailableData, BlockData, PoV}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_subsystem::{ ActiveLeavesUpdate, errors::RuntimeApiError, jaeger, messages::AllMessages, ActivatedLeaf, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 927ea955c2970b50bbd19af93e65383b0c57f2cc..dcd1008d2d2fe9757c8fd5bda3b47cc112b71be0 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -27,12 +27,12 @@ use futures::{channel::{mpsc, oneshot}, Future, FutureExt, SinkExt, StreamExt}; use sp_keystore::SyncCryptoStorePtr; use polkadot_primitives::v1::{ - AvailableData, BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, + BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, - PoV, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use polkadot_node_primitives::{ - Statement, SignedFullStatement, ValidationResult, + Statement, SignedFullStatement, ValidationResult, PoV, AvailableData, }; use polkadot_subsystem::{ PerLeafSpan, Stage, SubsystemSender, @@ -1348,12 +1348,12 @@ mod tests { use super::*; use assert_matches::assert_matches; use futures::{future, Future}; - use polkadot_primitives::v1::{BlockData, GroupRotationInfo, HeadData, PersistedValidationData, ScheduledCore}; + use polkadot_primitives::v1::{GroupRotationInfo, HeadData, PersistedValidationData, ScheduledCore}; use polkadot_subsystem::{ messages::{RuntimeApiRequest, RuntimeApiMessage}, ActiveLeavesUpdate, FromOverseer, OverseerSignal, ActivatedLeaf, }; - use polkadot_node_primitives::InvalidCandidate; + use polkadot_node_primitives::{InvalidCandidate, BlockData}; use sp_keyring::Sr25519Keyring; use sp_application_crypto::AppKey; use sp_keystore::{CryptoStore, SyncCryptoStore}; @@ -2763,7 +2763,7 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; // Not deterministic which message comes first: - for _ in 0..2 { + for _ in 0u32..2 { match virtual_overseer.recv().await { AllMessages::Provisioner( ProvisionerMessage::ProvisionableData( diff --git a/polkadot/node/core/candidate-selection/src/lib.rs b/polkadot/node/core/candidate-selection/src/lib.rs index 018a9aaa90fd63727683c5d7df42783aec91de8a..80cb8cfd3d404022be91dc703d5ffce378e0caa7 100644 --- a/polkadot/node/core/candidate-selection/src/lib.rs +++ b/polkadot/node/core/candidate-selection/src/lib.rs @@ -37,9 +37,9 @@ use polkadot_node_subsystem_util::{ JobTrait, JobSender, Validator, metrics::{self, prometheus}, }; use polkadot_primitives::v1::{ - CandidateReceipt, CollatorId, CoreState, CoreIndex, Hash, Id as ParaId, PoV, BlockNumber, + CandidateReceipt, CollatorId, CoreState, CoreIndex, Hash, Id as ParaId, BlockNumber, }; -use polkadot_node_primitives::SignedFullStatement; +use polkadot_node_primitives::{SignedFullStatement, PoV}; use std::{pin::Pin, sync::Arc}; use thiserror::Error; @@ -510,7 +510,7 @@ pub type CandidateSelectionSubsystem<Spawner> = JobSubsystem<CandidateSelectionJ mod tests { use super::*; use futures::lock::Mutex; - use polkadot_primitives::v1::BlockData; + use polkadot_node_primitives::BlockData; use polkadot_node_subsystem::messages::AllMessages; use sp_core::crypto::Public; use std::sync::Arc; diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index a57a2cb8ecfeacc68501b31899c209e4d04da613..5f6d857b0c764e220d33a6e2e4d39163e160a616 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -33,9 +33,9 @@ use polkadot_subsystem::{ }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_subsystem::errors::RuntimeApiError; -use polkadot_node_primitives::{ValidationResult, InvalidCandidate}; +use polkadot_node_primitives::{ValidationResult, InvalidCandidate, PoV}; use polkadot_primitives::v1::{ - ValidationCode, PoV, CandidateDescriptor, PersistedValidationData, + ValidationCode, CandidateDescriptor, PersistedValidationData, OccupiedCoreAssumption, Hash, CandidateCommitments, }; use polkadot_parachain::wasm_executor::{ @@ -568,7 +568,8 @@ impl metrics::Metrics for Metrics { mod tests { use super::*; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::v1::{HeadData, BlockData, UpwardMessage}; + use polkadot_primitives::v1::{HeadData, UpwardMessage}; + use polkadot_node_primitives::BlockData; use sp_core::testing::TaskExecutor; use futures::executor; use assert_matches::assert_matches; diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 32aa4491f980893551d93eb9be399ef35b341ca8..4bbcb05414730909dfbae458c26e46be53023448 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -11,6 +11,7 @@ mick-jaeger = "0.1.4" lazy_static = "1.4" parking_lot = "0.11.1" polkadot-primitives = { path = "../../primitives" } +polkadot-node-primitives = { path = "../primitives" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } thiserror = "1.0.23" diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 04caae645352ed2c54c136492ff5013639fb922d..acbb8541f2916f0de6564fd72e9971f7e5c1e97d 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -84,7 +84,8 @@ //! ``` use parity_scale_codec::Encode; -use polkadot_primitives::v1::{BlakeTwo256, CandidateHash, Hash, HashT, Id as ParaId, PoV, ValidatorIndex}; +use polkadot_primitives::v1::{BlakeTwo256, CandidateHash, Hash, HashT, Id as ParaId, ValidatorIndex}; +use polkadot_node_primitives::PoV; use sc_network::PeerId; use std::fmt; diff --git a/polkadot/node/metered-channel/src/bounded.rs b/polkadot/node/metered-channel/src/bounded.rs index 5ad1fae4205dfc9d2f7d012655602c3f23e54bde..38aa6f15c65fc19d47a8f8afaebf1c55cde927ca 100644 --- a/polkadot/node/metered-channel/src/bounded.rs +++ b/polkadot/node/metered-channel/src/bounded.rs @@ -91,9 +91,9 @@ impl<T> MeteredReceiver<T> { } impl<T> futures::stream::FusedStream for MeteredReceiver<T> { - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } + fn is_terminated(&self) -> bool { + self.inner.is_terminated() + } } @@ -155,32 +155,32 @@ impl<T> MeteredSender<T> { } impl<T> futures::sink::Sink<T> for MeteredSender<T> { - type Error = mpsc::SendError; - - fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - Pin::new(&mut self.inner).start_send(item) - } - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - Pin::new(&mut self.inner).poll_ready(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - match Pin::new(&mut self.inner).poll_close(cx) { - val @ Poll::Ready(_)=> { - val - } - other => other, - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - match Pin::new(&mut self.inner).poll_flush(cx) { - val @ Poll::Ready(_)=> { - self.meter.note_sent(); - val - } - other => other, - } - } + type Error = mpsc::SendError; + + fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + Pin::new(&mut self.inner).start_send(item) + } + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + Pin::new(&mut self.inner).poll_ready(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + match Pin::new(&mut self.inner).poll_close(cx) { + val @ Poll::Ready(_)=> { + val + } + other => other, + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + match Pin::new(&mut self.inner).poll_flush(cx) { + val @ Poll::Ready(_)=> { + self.meter.note_sent(); + val + } + other => other, + } + } } diff --git a/polkadot/node/metered-channel/src/unbounded.rs b/polkadot/node/metered-channel/src/unbounded.rs index 242b9198f4dcb59af89e6215ed90cc99d3eb4934..70a2115368b7fbe3e71624ae83f1c906be9bd8e7 100644 --- a/polkadot/node/metered-channel/src/unbounded.rs +++ b/polkadot/node/metered-channel/src/unbounded.rs @@ -26,74 +26,74 @@ use super::Meter; /// Create a wrapped `mpsc::channel` pair of `MeteredSender` and `MeteredReceiver`. pub fn unbounded<T>() -> (UnboundedMeteredSender<T>, UnboundedMeteredReceiver<T>) { - let (tx, rx) = mpsc::unbounded(); - let shared_meter = Meter::default(); - let tx = UnboundedMeteredSender { meter: shared_meter.clone(), inner: tx }; - let rx = UnboundedMeteredReceiver { meter: shared_meter, inner: rx }; - (tx, rx) + let (tx, rx) = mpsc::unbounded(); + let shared_meter = Meter::default(); + let tx = UnboundedMeteredSender { meter: shared_meter.clone(), inner: tx }; + let rx = UnboundedMeteredReceiver { meter: shared_meter, inner: rx }; + (tx, rx) } /// A receiver tracking the messages consumed by itself. #[derive(Debug)] pub struct UnboundedMeteredReceiver<T> { - // count currently contained messages - meter: Meter, - inner: mpsc::UnboundedReceiver<T>, + // count currently contained messages + meter: Meter, + inner: mpsc::UnboundedReceiver<T>, } impl<T> std::ops::Deref for UnboundedMeteredReceiver<T> { - type Target = mpsc::UnboundedReceiver<T>; - fn deref(&self) -> &Self::Target { - &self.inner - } + type Target = mpsc::UnboundedReceiver<T>; + fn deref(&self) -> &Self::Target { + &self.inner + } } impl<T> std::ops::DerefMut for UnboundedMeteredReceiver<T> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } } impl<T> Stream for UnboundedMeteredReceiver<T> { - type Item = T; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { - match mpsc::UnboundedReceiver::poll_next(Pin::new(&mut self.inner), cx) { - Poll::Ready(x) => { - self.meter.note_received(); - Poll::Ready(x) - } - other => other, - } - } - - /// Don't rely on the unreliable size hint. - fn size_hint(&self) -> (usize, Option<usize>) { - self.inner.size_hint() - } + type Item = T; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + match mpsc::UnboundedReceiver::poll_next(Pin::new(&mut self.inner), cx) { + Poll::Ready(x) => { + self.meter.note_received(); + Poll::Ready(x) + } + other => other, + } + } + + /// Don't rely on the unreliable size hint. + fn size_hint(&self) -> (usize, Option<usize>) { + self.inner.size_hint() + } } impl<T> UnboundedMeteredReceiver<T> { - /// Get an updated accessor object for all metrics collected. - pub fn meter(&self) -> &Meter { - &self.meter - } - - /// Attempt to receive the next item. - pub fn try_next(&mut self) -> Result<Option<T>, mpsc::TryRecvError> { - match self.inner.try_next()? { - Some(x) => { - self.meter.note_received(); - Ok(Some(x)) - } - None => Ok(None), - } - } + /// Get an updated accessor object for all metrics collected. + pub fn meter(&self) -> &Meter { + &self.meter + } + + /// Attempt to receive the next item. + pub fn try_next(&mut self) -> Result<Option<T>, mpsc::TryRecvError> { + match self.inner.try_next()? { + Some(x) => { + self.meter.note_received(); + Ok(Some(x)) + } + None => Ok(None), + } + } } impl<T> futures::stream::FusedStream for UnboundedMeteredReceiver<T> { - fn is_terminated(&self) -> bool { - self.inner.is_terminated() - } + fn is_terminated(&self) -> bool { + self.inner.is_terminated() + } } @@ -101,8 +101,8 @@ impl<T> futures::stream::FusedStream for UnboundedMeteredReceiver<T> { /// sent across it. #[derive(Debug)] pub struct UnboundedMeteredSender<T> { - meter: Meter, - inner: mpsc::UnboundedSender<T>, + meter: Meter, + inner: mpsc::UnboundedSender<T>, } impl<T> Clone for UnboundedMeteredSender<T> { @@ -112,76 +112,76 @@ impl<T> Clone for UnboundedMeteredSender<T> { } impl<T> std::ops::Deref for UnboundedMeteredSender<T> { - type Target = mpsc::UnboundedSender<T>; - fn deref(&self) -> &Self::Target { - &self.inner - } + type Target = mpsc::UnboundedSender<T>; + fn deref(&self) -> &Self::Target { + &self.inner + } } impl<T> std::ops::DerefMut for UnboundedMeteredSender<T> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } } impl<T> UnboundedMeteredSender<T> { - /// Get an updated accessor object for all metrics collected. - pub fn meter(&self) -> &Meter { - &self.meter - } - - /// Send message, wait until capacity is available. - pub async fn send(&mut self, item: T) -> result::Result<(), mpsc::SendError> - where - Self: Unpin, - { - self.meter.note_sent(); - let fut = self.inner.send(item); - futures::pin_mut!(fut); - fut.await.map_err(|e| { + /// Get an updated accessor object for all metrics collected. + pub fn meter(&self) -> &Meter { + &self.meter + } + + /// Send message, wait until capacity is available. + pub async fn send(&mut self, item: T) -> result::Result<(), mpsc::SendError> + where + Self: Unpin, + { + self.meter.note_sent(); + let fut = self.inner.send(item); + futures::pin_mut!(fut); + fut.await.map_err(|e| { self.meter.retract_sent(); e }) - } + } - /// Attempt to send message or fail immediately. - pub fn unbounded_send(&self, msg: T) -> result::Result<(), mpsc::TrySendError<T>> { - self.meter.note_sent(); - self.inner.unbounded_send(msg).map_err(|e| { + /// Attempt to send message or fail immediately. + pub fn unbounded_send(&self, msg: T) -> result::Result<(), mpsc::TrySendError<T>> { + self.meter.note_sent(); + self.inner.unbounded_send(msg).map_err(|e| { self.meter.retract_sent(); e }) - } + } } impl<T> futures::sink::Sink<T> for UnboundedMeteredSender<T> { - type Error = <futures::channel::mpsc::UnboundedSender<T> as futures::sink::Sink<T>>::Error; - - fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - Pin::new(&mut self.inner).start_send(item) - } - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - Pin::new(&mut self.inner).poll_ready(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - match Pin::new(&mut self.inner).poll_ready(cx) { - val @ Poll::Ready(_)=> { - val - } - other => other, - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { - match Pin::new(&mut self.inner).poll_ready(cx) { - val @ Poll::Ready(_)=> { - self.meter.note_sent(); - val - } - other => other, - } - } + type Error = <futures::channel::mpsc::UnboundedSender<T> as futures::sink::Sink<T>>::Error; + + fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + Pin::new(&mut self.inner).start_send(item) + } + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + Pin::new(&mut self.inner).poll_ready(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + match Pin::new(&mut self.inner).poll_ready(cx) { + val @ Poll::Ready(_)=> { + val + } + other => other, + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { + match Pin::new(&mut self.inner).poll_ready(cx) { + val @ Poll::Ready(_)=> { + self.meter.note_sent(); + val + } + other => other, + } + } } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 85fa6c549f7ca2b3efd0be8cc859c85c4c5bf005..9242ea10e42e7bdb741aa68f41caf018298c899b 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -13,6 +13,7 @@ polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-network-protocol = { path = "../../network/protocol" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } +polkadot-node-primitives = { path = "../../primitives" } polkadot-node-core-runtime-api = { path = "../../core/runtime-api" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } diff --git a/polkadot/node/network/availability-distribution/src/error.rs b/polkadot/node/network/availability-distribution/src/error.rs index c38294542007ccb941516792fcde89b33f6119f2..a4491aac6c1db8e9a6abfd69b014d39433297010 100644 --- a/polkadot/node/network/availability-distribution/src/error.rs +++ b/polkadot/node/network/availability-distribution/src/error.rs @@ -23,7 +23,8 @@ use thiserror::Error; use futures::channel::oneshot; use polkadot_node_subsystem_util::Error as UtilError; -use polkadot_primitives::v1::{CompressedPoVError, SessionIndex}; +use polkadot_primitives::v1::SessionIndex; +use polkadot_node_primitives::CompressedPoVError; use polkadot_subsystem::{errors::RuntimeApiError, SubsystemError}; use crate::LOG_TARGET; diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 2a3ed95e3b51c0e2ab6f6617ee1147cec1f9f49d..39525c72fce15b6466089cdeb1e47cf0a3923658 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -26,8 +26,9 @@ use polkadot_node_network_protocol::{ v1::{PoVFetchingRequest, PoVFetchingResponse}} }; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, CandidateHash, Hash, PoV, SessionIndex, ValidatorIndex + AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, ValidatorIndex }; +use polkadot_node_primitives::PoV; use polkadot_subsystem::{ ActiveLeavesUpdate, SubsystemContext, ActivatedLeaf, messages::{AllMessages, NetworkBridgeMessage, IfDisconnected} @@ -241,7 +242,8 @@ mod tests { use parity_scale_codec::Encode; use sp_core::testing::TaskExecutor; - use polkadot_primitives::v1::{BlockData, CandidateHash, CompressedPoV, Hash, ValidatorIndex}; + use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex}; + use polkadot_node_primitives::{BlockData, CompressedPoV}; use polkadot_subsystem_testhelpers as test_helpers; use polkadot_subsystem::messages::{AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest}; diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 7b9c39e52b2848a8cad17dc69e20a01d98f4ad59..c0a09842fd613123334b99a55f67ca961653f852 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -27,9 +27,10 @@ use polkadot_node_network_protocol::request_response::{ v1::{ChunkFetchingRequest, ChunkFetchingResponse}, }; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, BlakeTwo256, ErasureChunk, GroupIndex, Hash, HashT, OccupiedCore, + AuthorityDiscoveryId, BlakeTwo256, GroupIndex, Hash, HashT, OccupiedCore, SessionIndex, }; +use polkadot_node_primitives::ErasureChunk; use polkadot_subsystem::messages::{ AllMessages, AvailabilityStoreMessage, NetworkBridgeMessage, IfDisconnected, }; diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 76c247fb051d64eb2865803c96e3cf9cb5a4dd07..d84d2c646b10aec5cfa060c02043665eef3d29c7 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -26,7 +26,8 @@ use futures::task::{Poll, Context, noop_waker}; use sc_network as network; use sp_keyring::Sr25519Keyring; -use polkadot_primitives::v1::{BlockData, CandidateHash, PoV, ValidatorIndex}; +use polkadot_primitives::v1::{CandidateHash, ValidatorIndex}; +use polkadot_node_primitives::{BlockData, PoV}; use polkadot_node_network_protocol::request_response::v1; use polkadot_node_network_protocol::request_response::Recipient; use polkadot_subsystem::messages::AllMessages; diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index 3fcdbf2686bf5edc72c616b4cd656cc1e39318dc..5f91cbceef37a5f5d0e604c576cecd1429aaf505 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -19,7 +19,8 @@ use futures::channel::oneshot; use polkadot_node_network_protocol::request_response::{request::IncomingRequest, v1}; -use polkadot_primitives::v1::{AvailableData, CandidateHash, CompressedPoV, ErasureChunk, ValidatorIndex}; +use polkadot_primitives::v1::{CandidateHash, ValidatorIndex}; +use polkadot_node_primitives::{AvailableData, CompressedPoV, ErasureChunk}; use polkadot_subsystem::{ messages::{AllMessages, AvailabilityStoreMessage}, SubsystemContext, jaeger, diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs index 91eff4b4075d085828795e9b169ae052f941b97b..7c1b57b8085e455aab84a52c91efdc66cda25f94 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mock.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs @@ -25,10 +25,11 @@ use sp_application_crypto::AppKey; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_primitives::v1::{ - AvailableData, BlockData, CandidateCommitments, CandidateDescriptor, CandidateHash, - CommittedCandidateReceipt, ErasureChunk, GroupIndex, Hash, HeadData, Id as ParaId, - OccupiedCore, PersistedValidationData, PoV, SessionInfo, ValidatorId, ValidatorIndex + CandidateCommitments, CandidateDescriptor, CandidateHash, + CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, + OccupiedCore, PersistedValidationData, SessionInfo, ValidatorId, ValidatorIndex }; +use polkadot_node_primitives::{PoV, ErasureChunk, AvailableData, BlockData}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// Get mock keystore with `Ferdie` key. diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 99b0b2c830db931de97c624e74f32e28af37a9b7..dc3ce8d697e54885b814c4d1e97b1b1ef372f7c8 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -35,11 +35,13 @@ use polkadot_subsystem::{ActiveLeavesUpdate, FromOverseer, OverseerSignal, Activ RuntimeApiMessage, RuntimeApiRequest, } }; -use polkadot_primitives::v1::{CandidateHash, CoreState, ErasureChunk, GroupIndex, Hash, Id +use polkadot_primitives::v1::{CandidateHash, CoreState, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex }; -use polkadot_node_network_protocol::{jaeger, +use polkadot_node_primitives::ErasureChunk; +use polkadot_node_network_protocol::{ + jaeger, request_response::{IncomingRequest, OutgoingRequest, Requests, v1} }; use polkadot_subsystem_testhelpers as test_helpers; diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 21ae777bad12ccaea8d21573b98e916a545b96c1..8587d1b018dc898d5a8ad29f758fa3cc9faf7116 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -13,6 +13,7 @@ tracing = "0.1.25" polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../../network/protocol" } diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index d8d282e1152642692c5c13116b3445dcfae685bd..2033521bdca71f9663b24d3168256d0c97d768d5 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -26,10 +26,11 @@ use lru::LruCache; use rand::seq::SliceRandom; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, AvailableData, CandidateReceipt, CandidateHash, - Hash, ErasureChunk, ValidatorId, ValidatorIndex, + AuthorityDiscoveryId, CandidateReceipt, CandidateHash, + Hash, ValidatorId, ValidatorIndex, SessionInfo, SessionIndex, BlakeTwo256, HashT, GroupIndex, BlockNumber, }; +use polkadot_node_primitives::{ErasureChunk, AvailableData}; use polkadot_subsystem::{ SubsystemContext, SubsystemResult, SubsystemError, Subsystem, SpawnedSubsystem, FromOverseer, OverseerSignal, ActiveLeavesUpdate, diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index f7e2a2f30b68ac3a2ecd6a1e01b2b6578fda0e7f..70a9240874cd2fa019f9938cf78bc52c9a8d5904 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -27,8 +27,9 @@ use parity_scale_codec::Encode; use super::*; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, PersistedValidationData, PoV, BlockData, HeadData, + AuthorityDiscoveryId, PersistedValidationData, HeadData, }; +use polkadot_node_primitives::{PoV, BlockData}; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_subsystem_testhelpers as test_helpers; diff --git a/polkadot/node/network/collator-protocol/src/collator_side.rs b/polkadot/node/network/collator-protocol/src/collator_side.rs index 32d0cef7bf76879d0cae3afacbc002b9f45fc901..5a492e875555766aa85cd10885ffee9269ce748b 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side.rs @@ -22,8 +22,8 @@ use futures::{select, FutureExt, channel::oneshot}; use sp_core::Pair; use polkadot_primitives::v1::{ - CandidateHash, CandidateReceipt, CollatorPair, CompressedPoV, CoreIndex, CoreState, Hash, - Id as ParaId, PoV, ValidatorId + CandidateHash, CandidateReceipt, CollatorPair, CoreIndex, CoreState, Hash, + Id as ParaId, ValidatorId }; use polkadot_subsystem::{ jaeger, PerLeafSpan, @@ -42,7 +42,7 @@ use polkadot_node_subsystem_util::{ request_availability_cores_ctx, metrics::{self, prometheus}, }; -use polkadot_node_primitives::{SignedFullStatement, Statement}; +use polkadot_node_primitives::{SignedFullStatement, Statement, PoV, CompressedPoV}; #[derive(Clone, Default)] pub struct Metrics(Option<MetricsInner>); @@ -949,9 +949,10 @@ mod tests { }; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, BlockData, CandidateDescriptor, CollatorPair, GroupRotationInfo, + AuthorityDiscoveryId, CandidateDescriptor, CollatorPair, GroupRotationInfo, ScheduledCore, SessionIndex, SessionInfo, ValidatorIndex, }; + use polkadot_node_primitives::BlockData; use polkadot_subsystem::{ jaeger, messages::{RuntimeApiMessage, RuntimeApiRequest}, diff --git a/polkadot/node/network/collator-protocol/src/validator_side.rs b/polkadot/node/network/collator-protocol/src/validator_side.rs index 5d66dd8256020edb3ffdd864d9985f3e2a7b11d0..c489643a1929bf54859d19f7bfe286ce15670293 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side.rs @@ -32,9 +32,9 @@ use polkadot_node_network_protocol::{ }, OurView, PeerId, UnifiedReputationChange as Rep, View, }; -use polkadot_node_primitives::{SignedFullStatement, Statement}; +use polkadot_node_primitives::{SignedFullStatement, Statement, PoV}; use polkadot_node_subsystem_util::metrics::{self, prometheus}; -use polkadot_primitives::v1::{CandidateReceipt, CollatorId, Hash, Id as ParaId, PoV}; +use polkadot_primitives::v1::{CandidateReceipt, CollatorId, Hash, Id as ParaId}; use polkadot_subsystem::{ jaeger, messages::{ @@ -896,7 +896,8 @@ mod tests { use sp_core::{crypto::Pair, Encode}; use assert_matches::assert_matches; - use polkadot_primitives::v1::{BlockData, CollatorPair, CompressedPoV}; + use polkadot_primitives::v1::CollatorPair; + use polkadot_node_primitives::{BlockData, CompressedPoV}; use polkadot_subsystem_testhelpers as test_helpers; use polkadot_node_network_protocol::{our_view, ObservedRole, request_response::Requests diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index fce55a500082b79feeac3be71fc581ee9bec6e54..222869937bcdc493ad5edaa281bb1e53c8e1ee22 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -122,12 +122,12 @@ impl<T> Index<PeerSet> for PerPeerSet<T> { } impl<T> IndexMut<PeerSet> for PerPeerSet<T> { - fn index_mut(&mut self, index: PeerSet) -> &mut T { - match index { + fn index_mut(&mut self, index: PeerSet) -> &mut T { + match index { PeerSet::Validation => &mut self.validation, PeerSet::Collation => &mut self.collation, } - } + } } /// Get `NonDefaultSetConfig`s for all available peer sets. diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 85eab9fc7a80151332571cea0999b272940006ac..b2bf5abf81b8b2bd13191e92e5a7a0a8ebac2df7 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -36,7 +36,7 @@ use std::borrow::Cow; use std::time::Duration; use futures::channel::mpsc; -use polkadot_primitives::v1::MAX_COMPRESSED_POV_SIZE; +use polkadot_node_primitives::MAX_COMPRESSED_POV_SIZE; use strum::EnumIter; pub use sc_network::config as network; diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index c6dde110b62c59c5f7050d69ab444f668e8085a6..97934075b629cb2e5c339ee3878aca5575f10306 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -19,10 +19,11 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::v1::{ - AvailableData, CandidateHash, CandidateReceipt, ErasureChunk, ValidatorIndex, - CompressedPoV, Hash, + CandidateHash, CandidateReceipt, ValidatorIndex, + Hash, }; use polkadot_primitives::v1::Id as ParaId; +use polkadot_node_primitives::{AvailableData, CompressedPoV, ErasureChunk}; use super::request::IsRequest; use super::Protocol; diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index e481d38adcc6bdfbe1f78ea170348de3320cebb3..2ebf2a07daa7c09bd8e0f5d13696b082bce4416b 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -26,7 +26,7 @@ use futures::{ }; use futures_timer::Delay; -use polkadot_primitives::v1::{BlockData, PoV}; +use polkadot_node_primitives::{PoV, BlockData}; use polkadot_overseer::{Overseer, AllSubsystems}; use polkadot_subsystem::{Subsystem, SubsystemContext, SpawnedSubsystem, FromOverseer}; diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 214e085f1d24d78de7ef507ef59854f232cd9ef7..fa56dd5fc2b7c54ffcb235c4c271d23ca227e309 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -66,7 +66,7 @@ use std::task::Poll; use std::time::Duration; use std::collections::{hash_map, HashMap}; -use futures::channel::{oneshot}; +use futures::channel::oneshot; use futures::{ poll, select, future::BoxFuture, @@ -2680,9 +2680,9 @@ mod tests { use std::collections::HashMap; use futures::{executor, pin_mut, select, FutureExt, pending}; - use polkadot_primitives::v1::{BlockData, CollatorPair, PoV, CandidateHash}; + use polkadot_primitives::v1::{CollatorPair, CandidateHash}; use polkadot_subsystem::{messages::RuntimeApiRequest, messages::NetworkBridgeEvent, jaeger}; - use polkadot_node_primitives::{CollationResult, CollationGenerationConfig}; + use polkadot_node_primitives::{CollationResult, CollationGenerationConfig, PoV, BlockData}; use polkadot_node_network_protocol::{PeerId, UnifiedReputationChange}; use polkadot_node_subsystem_util::metered; diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 6259d114269cc1bca25472525d012537cbd14f39..528cd1ff80492c280688da61cfaa708d032b7214 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -15,5 +15,10 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-vrf = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-parachain = { path = "../../parachain", default-features = false } schnorrkel = "0.9.1" thiserror = "1.0.22" +serde = { version = "1.0.123", features = ["derive"] } + +[target.'cfg(not(target_os = "unknown"))'.dependencies] +zstd = "0.5.0" diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index b4e016ecadbab09cf0d3e3cc7a84689d5a4c8692..bbbddf5ab6a4d8e98b0bda4f416b277646994fee 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -22,20 +22,21 @@ #![deny(missing_docs)] +use std::pin::Pin; + +use serde::{Serialize, Deserialize}; use futures::Future; use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::v1::{ - CandidateCommitments, CandidateHash, CollatorPair, CommittedCandidateReceipt, CompactStatement, - EncodeAs, Hash, HeadData, Id as ParaId, OutboundHrmpMessage, PersistedValidationData, PoV, - Signed, UpwardMessage, ValidationCode, -}; -use std::pin::Pin; pub use sp_core::traits::SpawnNamed; pub use sp_consensus_babe::{ Epoch as BabeEpoch, BabeEpochConfiguration, AllowedSlots as BabeAllowedSlots, }; +use polkadot_primitives::v1::{CandidateCommitments, CandidateHash, CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs, Hash, HeadData, Id as ParaId, OutboundHrmpMessage, PersistedValidationData, Signed, UpwardMessage, ValidationCode, BlakeTwo256, HashT, ValidatorIndex}; +pub use polkadot_parachain::primitives::BlockData; + + pub mod approval; /// A statement, where the candidate receipt is included in the `Seconded` variant. @@ -140,6 +141,102 @@ pub enum ValidationResult { Invalid(InvalidCandidate), } +/// Maximum PoV size we support right now. +pub const MAX_POV_SIZE: u32 = 50 * 1024 * 1024; + +/// Very conservative (compression ratio of 1). +/// +/// Experiments showed that we have a typical compression ratio of 3.4. +/// https://github.com/ordian/bench-compression-algorithms/ +/// +/// So this could be reduced if deemed necessary. +pub const MAX_COMPRESSED_POV_SIZE: u32 = MAX_POV_SIZE; + +/// A Proof-of-Validity +#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] +pub struct PoV { + /// The block witness data. + pub block_data: BlockData, +} + +impl PoV { + /// Get the blake2-256 hash of the PoV. + pub fn hash(&self) -> Hash { + BlakeTwo256::hash_of(self) + } +} + +/// SCALE and Zstd encoded [`PoV`]. +#[derive(Clone, Encode, Decode, PartialEq, Eq)] +pub struct CompressedPoV(Vec<u8>); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] +#[allow(missing_docs)] +pub enum CompressedPoVError { + #[error("Failed to compress a PoV")] + Compress, + #[error("Failed to decompress a PoV")] + Decompress, + #[error("Failed to decode the uncompressed PoV")] + Decode, + #[error("Architecture is not supported")] + NotSupported, +} + +impl CompressedPoV { + /// Compress the given [`PoV`] and returns a [`CompressedPoV`]. + #[cfg(not(target_os = "unknown"))] + pub fn compress(pov: &PoV) -> Result<Self, CompressedPoVError> { + zstd::encode_all(pov.encode().as_slice(), 3).map_err(|_| CompressedPoVError::Compress).map(Self) + } + + /// Compress the given [`PoV`] and returns a [`CompressedPoV`]. + #[cfg(target_os = "unknown")] + pub fn compress(_: &PoV) -> Result<Self, CompressedPoVError> { + Err(CompressedPoVError::NotSupported) + } + + /// Decompress `self` and returns the [`PoV`] on success. + #[cfg(not(target_os = "unknown"))] + pub fn decompress(&self) -> Result<PoV, CompressedPoVError> { + use std::io::Read; + + struct InputDecoder<'a, T: std::io::BufRead>(&'a mut zstd::Decoder<T>, usize); + impl<'a, T: std::io::BufRead> parity_scale_codec::Input for InputDecoder<'a, T> { + fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> { + self.1 = self.1.saturating_add(into.len()); + if self.1 > MAX_POV_SIZE as usize { + return Err("pov block too big".into()) + } + self.0.read_exact(into).map_err(Into::into) + } + fn remaining_len(&mut self) -> Result<Option<usize>, parity_scale_codec::Error> { + Ok(None) + } + } + + let mut decoder = zstd::Decoder::new(self.0.as_slice()).map_err(|_| CompressedPoVError::Decompress)?; + PoV::decode(&mut InputDecoder(&mut decoder, 0)).map_err(|_| CompressedPoVError::Decode) + } + + /// Decompress `self` and returns the [`PoV`] on success. + #[cfg(target_os = "unknown")] + pub fn decompress(&self) -> Result<PoV, CompressedPoVError> { + Err(CompressedPoVError::NotSupported) + } + + /// Get compressed data size. + pub fn len(&self) -> usize { + self.0.len() + } +} + +impl std::fmt::Debug for CompressedPoV { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CompressedPoV({} bytes)", self.0.len()) + } +} + /// The output of a collator. /// /// This differs from `CandidateCommitments` in two ways: @@ -210,3 +307,36 @@ impl std::fmt::Debug for CollationGenerationConfig { write!(f, "CollationGenerationConfig {{ ... }}") } } + +/// This is the data we keep available for each candidate included in the relay chain. +#[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] +pub struct AvailableData { + /// The Proof-of-Validation of the candidate. + pub pov: std::sync::Arc<PoV>, + /// The persisted validation data needed for secondary checks. + pub validation_data: PersistedValidationData, +} + +/// A chunk of erasure-encoded block data. +#[derive(PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Debug, Hash)] +pub struct ErasureChunk { + /// The erasure-encoded chunk of data belonging to the candidate block. + pub chunk: Vec<u8>, + /// The index of this erasure-encoded chunk of data. + pub index: ValidatorIndex, + /// Proof for this chunk's branch in the Merkle tree. + pub proof: Vec<Vec<u8>>, +} + +#[cfg(test)] +mod test { + use super::{CompressedPoV, CompressedPoVError, PoV}; + + #[test] + fn decompress_huge_pov_block_fails() { + let pov = PoV { block_data: vec![0; 63 * 1024 * 1024].into() }; + + let compressed = CompressedPoV::compress(&pov).unwrap(); + assert_eq!(CompressedPoVError::Decode, compressed.decompress().unwrap_err()); + } +} diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index fab0327c2cb47c1d6699499596b2a713808c7929..dcc71d283d67826a13b226e52caa32cf4dbc8b0b 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -66,6 +66,7 @@ polkadot-node-core-proposer = { path = "../core/proposer" } polkadot-overseer = { path = "../overseer" } polkadot-parachain = { path = "../../parachain" } polkadot-primitives = { path = "../../primitives" } +polkadot-node-primitives = { path = "../primitives" } polkadot-rpc = { path = "../../rpc" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index d1425b92e930003a464fc0442872dd9f0b4286eb..21eaa8442f78cfc2f77bec1d658024547273c13a 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -25,7 +25,8 @@ use kusama_runtime as kusama; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_staking::Forcing; use polkadot::constants::currency::DOTS; -use polkadot_primitives::v1::{AccountId, AccountPublic, AssignmentId, MAX_POV_SIZE, ValidatorId}; +use polkadot_node_primitives::MAX_POV_SIZE; +use polkadot_primitives::v1::{AccountId, AccountPublic, AssignmentId, ValidatorId}; use polkadot_runtime as polkadot; use rococo_runtime as rococo; use rococo_runtime::constants::currency::DOTS as ROC; diff --git a/polkadot/node/subsystem/src/messages.rs b/polkadot/node/subsystem/src/messages.rs index a8fcf9c3e85a8f26fafc6eb550850b3f6bb9e207..06d41def8083881d709df1b05f1c3c677eb1fb9f 100644 --- a/polkadot/node/subsystem/src/messages.rs +++ b/polkadot/node/subsystem/src/messages.rs @@ -34,14 +34,14 @@ use polkadot_node_network_protocol::{ use polkadot_node_primitives::{ CollationGenerationConfig, SignedFullStatement, ValidationResult, approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, - BabeEpoch, + BabeEpoch, AvailableData, PoV, ErasureChunk }; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, AvailableData, BackedCandidate, BlockNumber, SessionInfo, + AuthorityDiscoveryId, BackedCandidate, BlockNumber, SessionInfo, Header as BlockHeader, CandidateDescriptor, CandidateEvent, CandidateReceipt, - CollatorId, CommittedCandidateReceipt, CoreState, ErasureChunk, + CollatorId, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, PoV, SessionIndex, SignedAvailabilityBitfield, + PersistedValidationData, SessionIndex, SignedAvailabilityBitfield, ValidationCode, ValidatorId, CandidateHash, ValidatorIndex, ValidatorSignature, InboundDownwardMessage, InboundHrmpMessage, CandidateIndex, GroupIndex, diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 64ce93315e67316848c03191861a2a0230c0441f..3f21f763724dffe8907074a71c4fdfc9bba85110 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -193,7 +193,7 @@ pub trait AccountIdConversion<AccountId>: Sized { /// Convert into an account ID. This is infallible. fn into_account(&self) -> AccountId; - /// Try to convert an account ID into this type. Might not succeed. + /// Try to convert an account ID into this type. Might not succeed. fn try_from_account(a: &AccountId) -> Option<Self>; } @@ -225,7 +225,7 @@ impl<T: Encode + Decode + Default> AccountIdConversion<T> for Id { ).unwrap_or_default() } - fn try_from_account(x: &T) -> Option<Self> { + fn try_from_account(x: &T) -> Option<Self> { x.using_encoded(|d| { if &d[0..4] != b"para" { return None } let mut cursor = &d[4..]; diff --git a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs index bac6ced05650822c2c26e6e19dfe8367be298779..5573d2d0639f8425814a5a22be7dc193033bd5a2 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs @@ -18,7 +18,8 @@ use futures_timer::Delay; use polkadot_node_primitives::{Collation, CollatorFn, CollationResult, Statement, SignedFullStatement}; -use polkadot_primitives::v1::{CollatorId, CollatorPair, PoV}; +use polkadot_primitives::v1::{CollatorId, CollatorPair}; +use polkadot_node_primitives::PoV; use parity_scale_codec::{Encode, Decode}; use sp_core::{Pair, traits::SpawnNamed}; use std::{ diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 93390a4023b520d97dbc12d4ab5a59878cda348e..68810539475d8d32d3a3fd23c13f6d6384c7b7a9 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -28,9 +28,6 @@ hex-literal = "0.3.1" parity-util-mem = { version = "0.9.0", default-features = false, optional = true } thiserror = "1.0.23" -[target.'cfg(not(target_os = "unknown"))'.dependencies] -zstd = "0.5.0" - [dev-dependencies] sp-serializer = { git = "https://github.com/paritytech/substrate", branch = "master" } pretty_assertions = "0.6.1" diff --git a/polkadot/primitives/src/v0.rs b/polkadot/primitives/src/v0.rs index bdd5b1426ead6e69ab25ff80558f36e07b4b53b6..9a7112bccd0d0b84bb2dd8bad1c2a0127bd6d2c8 100644 --- a/polkadot/primitives/src/v0.rs +++ b/polkadot/primitives/src/v0.rs @@ -665,18 +665,6 @@ pub struct AvailableData { // In the future, outgoing messages as well. } -/// A chunk of erasure-encoded block data. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash))] -pub struct ErasureChunk { - /// The erasure-encoded chunk of data belonging to the candidate block. - pub chunk: Vec<u8>, - /// The index of this erasure-encoded chunk of data. - pub index: ValidatorIndex, - /// Proof for this chunk's branch in the Merkle tree. - pub proof: Vec<Vec<u8>>, -} - const BACKING_STATEMENT_MAGIC: [u8; 4] = *b"BKNG"; /// Statements that can be made about parachain candidates. These are the diff --git a/polkadot/primitives/src/v1.rs b/polkadot/primitives/src/v1.rs index 94c1bff5169c4ce55d8a0cde28180be93bc622dd..4cb8a09b812cde306aff2aed7642aa1c6e2202e1 100644 --- a/polkadot/primitives/src/v1.rs +++ b/polkadot/primitives/src/v1.rs @@ -38,14 +38,14 @@ pub use polkadot_core_primitives::v1::{ // Export some polkadot-parachain primitives pub use polkadot_parachain::primitives::{ - Id, LOWEST_USER_ID, HrmpChannelId, UpwardMessage, HeadData, BlockData, ValidationCode, + Id, LOWEST_USER_ID, HrmpChannelId, UpwardMessage, HeadData, ValidationCode, }; // Export some basic parachain primitives from v0. pub use crate::v0::{ CollatorId, CollatorSignature, PARACHAIN_KEY_TYPE_ID, ValidatorId, ValidatorIndex, ValidatorSignature, SigningContext, Signed, ValidityAttestation, - CompactStatement, SignedStatement, ErasureChunk, EncodeAs, + CompactStatement, SignedStatement, EncodeAs, }; #[cfg(feature = "std")] @@ -438,106 +438,6 @@ impl CandidateCommitments { } } -/// A Proof-of-Validity -#[derive(PartialEq, Eq, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct PoV { - /// The block witness data. - pub block_data: BlockData, -} - -impl PoV { - /// Get the blake2-256 hash of the PoV. - #[cfg(feature = "std")] - pub fn hash(&self) -> Hash { - BlakeTwo256::hash_of(self) - } -} - -/// SCALE and Zstd encoded [`PoV`]. -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct CompressedPoV(Vec<u8>); - -/// Maximum PoV size we support right now. -pub const MAX_POV_SIZE: u32 = 50 * 1024 * 1024; - -/// Very conservative (compression ratio of 1). -/// -/// Experiments showed that we have a typical compression ratio of 3.4. -/// https://github.com/ordian/bench-compression-algorithms/ -/// -/// So this could be reduced if deemed necessary. -pub const MAX_COMPRESSED_POV_SIZE: u32 = MAX_POV_SIZE; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] -#[cfg(feature = "std")] -#[allow(missing_docs)] -pub enum CompressedPoVError { - #[error("Failed to compress a PoV")] - Compress, - #[error("Failed to decompress a PoV")] - Decompress, - #[error("Failed to decode the uncompressed PoV")] - Decode, - #[error("Architecture is not supported")] - NotSupported, -} - -#[cfg(feature = "std")] -impl CompressedPoV { - /// Compress the given [`PoV`] and returns a [`CompressedPoV`]. - #[cfg(not(target_os = "unknown"))] - pub fn compress(pov: &PoV) -> Result<Self, CompressedPoVError> { - zstd::encode_all(pov.encode().as_slice(), 3).map_err(|_| CompressedPoVError::Compress).map(Self) - } - - /// Compress the given [`PoV`] and returns a [`CompressedPoV`]. - #[cfg(target_os = "unknown")] - pub fn compress(_: &PoV) -> Result<Self, CompressedPoVError> { - Err(CompressedPoVError::NotSupported) - } - - /// Decompress `self` and returns the [`PoV`] on success. - #[cfg(not(target_os = "unknown"))] - pub fn decompress(&self) -> Result<PoV, CompressedPoVError> { - use std::io::Read; - - struct InputDecoder<'a, T: std::io::BufRead>(&'a mut zstd::Decoder<T>, usize); - impl<'a, T: std::io::BufRead> parity_scale_codec::Input for InputDecoder<'a, T> { - fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> { - self.1 = self.1.saturating_add(into.len()); - if self.1 > MAX_POV_SIZE as usize { - return Err("pov block too big".into()) - } - self.0.read_exact(into).map_err(Into::into) - } - fn remaining_len(&mut self) -> Result<Option<usize>, parity_scale_codec::Error> { - Ok(None) - } - } - - let mut decoder = zstd::Decoder::new(self.0.as_slice()).map_err(|_| CompressedPoVError::Decompress)?; - PoV::decode(&mut InputDecoder(&mut decoder, 0)).map_err(|_| CompressedPoVError::Decode) - } - - /// Decompress `self` and returns the [`PoV`] on success. - #[cfg(target_os = "unknown")] - pub fn decompress(&self) -> Result<PoV, CompressedPoVError> { - Err(CompressedPoVError::NotSupported) - } - - /// Get compressed data size. - pub fn len(&self) -> usize { - self.0.len() - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for CompressedPoV { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "CompressedPoV({} bytes)", self.0.len()) - } -} /// A bitfield concerning availability of backed candidates. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] @@ -681,16 +581,6 @@ pub enum CoreOccupied { Parachain, } -/// This is the data we keep available for each candidate included in the relay chain. -#[cfg(feature = "std")] -#[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] -pub struct AvailableData { - /// The Proof-of-Validation of the candidate. - pub pov: std::sync::Arc<PoV>, - /// The persisted validation data needed for secondary checks. - pub validation_data: PersistedValidationData, -} - /// A helper data-type for tracking validator-group rotations. #[derive(Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(PartialEq, Debug, MallocSizeOf))] @@ -1104,7 +994,6 @@ impl<H> From<ConsensusLog> for runtime_primitives::DigestItem<H> { #[cfg(test)] mod tests { use super::*; - use super::{CompressedPoV, CompressedPoVError, PoV}; #[test] fn group_rotation_info_calculations() { @@ -1131,14 +1020,4 @@ mod tests { &Hash::repeat_byte(3), ); } - - - #[cfg(not(target_os = "unknown"))] - #[test] - fn decompress_huge_pov_block_fails() { - let pov = PoV { block_data: vec![0; 63 * 1024 * 1024].into() }; - - let compressed = CompressedPoV::compress(&pov).unwrap(); - assert_eq!(CompressedPoVError::Decode, compressed.decompress().unwrap_err()); - } } diff --git a/polkadot/runtime/parachains/src/inclusion.rs b/polkadot/runtime/parachains/src/inclusion.rs index 998ed8f25c9ac66454dd207b5627230baf24a505..495ded3720b6fca010a38fb9fb7f91983cc87969 100644 --- a/polkadot/runtime/parachains/src/inclusion.rs +++ b/polkadot/runtime/parachains/src/inclusion.rs @@ -2081,7 +2081,7 @@ mod tests { let thread_a = ParaId::from(3); // The block number of the relay-parent for testing. - const RELAY_PARENT_NUM: BlockNumber = 4; + const RELAY_PARENT_NUM: BlockNumber = 4; let paras = vec![(chain_a, true), (chain_b, true), (thread_a, false)]; let validators = vec![