Unverified Commit 76720173 authored by Bastian Köcher's avatar Bastian Köcher Committed by GitHub
Browse files

Make `CandidateHash` a real type (#1916)



* Make `CandidateHash` a real type

This pr adds a new type `CandidateHash` that is used instead of the
opaque `Hash` type. This helps to ensure on the type system level that
we are passing the correct types.

This pr also fixes wrong usage of `relay_parent` as `candidate_hash`
when communicating with the av storage.

* Update core-primitives/src/lib.rs

Co-authored-by: default avatarPeter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>

* Wrap the lines

Co-authored-by: default avatarPeter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>
parent 9758a180
...@@ -50,6 +50,14 @@ pub type ChainId = u32; ...@@ -50,6 +50,14 @@ pub type ChainId = u32;
/// A hash of some data used by the relay chain. /// A hash of some data used by the relay chain.
pub type Hash = sp_core::H256; pub type Hash = sp_core::H256;
/// Unit type wrapper around [`Hash`] that represents a candidate hash.
///
/// This type is produced by [`CandidateReceipt::hash`].
///
/// This type makes it easy to enforce that a hash is a candidate hash on the type level.
#[derive(Clone, Copy, codec::Encode, codec::Decode, Hash, Eq, PartialEq, Debug, Default)]
pub struct CandidateHash(pub Hash);
/// Index of a transaction in the relay chain. 32-bit should be plenty. /// Index of a transaction in the relay chain. 32-bit should be plenty.
pub type Nonce = u32; pub type Nonce = u32;
......
...@@ -33,7 +33,7 @@ use kvdb_rocksdb::{Database, DatabaseConfig}; ...@@ -33,7 +33,7 @@ use kvdb_rocksdb::{Database, DatabaseConfig};
use kvdb::{KeyValueDB, DBTransaction}; use kvdb::{KeyValueDB, DBTransaction};
use polkadot_primitives::v1::{ use polkadot_primitives::v1::{
Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, CandidateHash,
}; };
use polkadot_subsystem::{ use polkadot_subsystem::{
FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem, FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem,
...@@ -242,7 +242,7 @@ enum CandidateState { ...@@ -242,7 +242,7 @@ enum CandidateState {
#[derive(Debug, Decode, Encode, Eq)] #[derive(Debug, Decode, Encode, Eq)]
struct PoVPruningRecord { struct PoVPruningRecord {
candidate_hash: Hash, candidate_hash: CandidateHash,
block_number: BlockNumber, block_number: BlockNumber,
candidate_state: CandidateState, candidate_state: CandidateState,
prune_at: PruningDelay, prune_at: PruningDelay,
...@@ -272,7 +272,7 @@ impl PartialOrd for PoVPruningRecord { ...@@ -272,7 +272,7 @@ impl PartialOrd for PoVPruningRecord {
#[derive(Debug, Decode, Encode, Eq)] #[derive(Debug, Decode, Encode, Eq)]
struct ChunkPruningRecord { struct ChunkPruningRecord {
candidate_hash: Hash, candidate_hash: CandidateHash,
block_number: BlockNumber, block_number: BlockNumber,
candidate_state: CandidateState, candidate_state: CandidateState,
chunk_index: u32, chunk_index: u32,
...@@ -387,11 +387,11 @@ impl AvailabilityStoreSubsystem { ...@@ -387,11 +387,11 @@ impl AvailabilityStoreSubsystem {
} }
} }
fn available_data_key(candidate_hash: &Hash) -> Vec<u8> { fn available_data_key(candidate_hash: &CandidateHash) -> Vec<u8> {
(candidate_hash, 0i8).encode() (candidate_hash, 0i8).encode()
} }
fn erasure_chunk_key(candidate_hash: &Hash, index: u32) -> Vec<u8> { fn erasure_chunk_key(candidate_hash: &CandidateHash, index: u32) -> Vec<u8> {
(candidate_hash, index, 0i8).encode() (candidate_hash, index, 0i8).encode()
} }
...@@ -564,7 +564,7 @@ where ...@@ -564,7 +564,7 @@ where
log::trace!( log::trace!(
target: LOG_TARGET, target: LOG_TARGET,
"Updating pruning record for finalized block {}", "Updating pruning record for finalized block {}",
record.candidate_hash, record.block_number,
); );
record.prune_at = PruningDelay::into_the_future( record.prune_at = PruningDelay::into_the_future(
...@@ -583,7 +583,7 @@ where ...@@ -583,7 +583,7 @@ where
log::trace!( log::trace!(
target: LOG_TARGET, target: LOG_TARGET,
"Updating chunk pruning record for finalized block {}", "Updating chunk pruning record for finalized block {}",
record.candidate_hash, record.block_number,
); );
record.prune_at = PruningDelay::into_the_future( record.prune_at = PruningDelay::into_the_future(
...@@ -620,7 +620,7 @@ where ...@@ -620,7 +620,7 @@ where
for event in events.into_iter() { for event in events.into_iter() {
if let CandidateEvent::CandidateIncluded(receipt, _) = event { if let CandidateEvent::CandidateIncluded(receipt, _) = event {
log::trace!(target: LOG_TARGET, "Candidate {} was included", receipt.hash()); log::trace!(target: LOG_TARGET, "Candidate {:?} was included", receipt.hash());
included.insert(receipt.hash()); included.insert(receipt.hash());
} }
} }
...@@ -729,7 +729,10 @@ where ...@@ -729,7 +729,10 @@ where
Ok(()) Ok(())
} }
fn available_data(db: &Arc<dyn KeyValueDB>, candidate_hash: &Hash) -> Option<StoredAvailableData> { fn available_data(
db: &Arc<dyn KeyValueDB>,
candidate_hash: &CandidateHash,
) -> Option<StoredAvailableData> {
query_inner(db, columns::DATA, &available_data_key(candidate_hash)) query_inner(db, columns::DATA, &available_data_key(candidate_hash))
} }
...@@ -835,7 +838,7 @@ where ...@@ -835,7 +838,7 @@ where
fn store_available_data( fn store_available_data(
subsystem: &mut AvailabilityStoreSubsystem, subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash, candidate_hash: &CandidateHash,
id: Option<ValidatorIndex>, id: Option<ValidatorIndex>,
n_validators: u32, n_validators: u32,
available_data: AvailableData, available_data: AvailableData,
...@@ -872,7 +875,7 @@ fn store_available_data( ...@@ -872,7 +875,7 @@ fn store_available_data(
} }
let pruning_record = PoVPruningRecord { let pruning_record = PoVPruningRecord {
candidate_hash: candidate_hash.clone(), candidate_hash: *candidate_hash,
block_number, block_number,
candidate_state: CandidateState::Stored, candidate_state: CandidateState::Stored,
prune_at, prune_at,
...@@ -901,7 +904,7 @@ fn store_available_data( ...@@ -901,7 +904,7 @@ fn store_available_data(
fn store_chunk( fn store_chunk(
subsystem: &mut AvailabilityStoreSubsystem, subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash, candidate_hash: &CandidateHash,
_n_validators: u32, _n_validators: u32,
chunk: ErasureChunk, chunk: ErasureChunk,
block_number: BlockNumber, block_number: BlockNumber,
...@@ -952,7 +955,7 @@ fn store_chunk( ...@@ -952,7 +955,7 @@ fn store_chunk(
fn get_chunk( fn get_chunk(
subsystem: &mut AvailabilityStoreSubsystem, subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash, candidate_hash: &CandidateHash,
index: u32, index: u32,
) -> Result<Option<ErasureChunk>, Error> { ) -> Result<Option<ErasureChunk>, Error> {
if let Some(chunk) = query_inner( if let Some(chunk) = query_inner(
...@@ -981,7 +984,11 @@ fn get_chunk( ...@@ -981,7 +984,11 @@ fn get_chunk(
Ok(None) Ok(None)
} }
fn query_inner<D: Decode>(db: &Arc<dyn KeyValueDB>, column: u32, key: &[u8]) -> Option<D> { fn query_inner<D: Decode>(
db: &Arc<dyn KeyValueDB>,
column: u32,
key: &[u8],
) -> Option<D> {
match db.get(column, key) { match db.get(column, key) {
Ok(Some(raw)) => { Ok(Some(raw)) => {
let res = D::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"); let res = D::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed");
......
...@@ -27,7 +27,7 @@ use smallvec::smallvec; ...@@ -27,7 +27,7 @@ use smallvec::smallvec;
use polkadot_primitives::v1::{ use polkadot_primitives::v1::{
AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData, AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData,
PersistedValidationData, PoV, Id as ParaId, PersistedValidationData, PoV, Id as ParaId, CandidateHash,
}; };
use polkadot_node_subsystem_util::TimeoutExt; use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_subsystem::{ use polkadot_subsystem::{
...@@ -199,7 +199,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() { ...@@ -199,7 +199,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() {
// but that's fine, we're still alive // but that's fine, we're still alive
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let candidate_hash = Hash::repeat_byte(33); let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5; let validator_index = 5;
let query_chunk = AvailabilityStoreMessage::QueryChunk( let query_chunk = AvailabilityStoreMessage::QueryChunk(
candidate_hash, candidate_hash,
...@@ -220,7 +220,7 @@ fn store_chunk_works() { ...@@ -220,7 +220,7 @@ fn store_chunk_works() {
test_harness(PruningConfig::default(), store.clone(), |test_harness| async move { test_harness(PruningConfig::default(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness; let TestHarness { mut virtual_overseer } = test_harness;
let relay_parent = Hash::repeat_byte(32); let relay_parent = Hash::repeat_byte(32);
let candidate_hash = Hash::repeat_byte(33); let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5; let validator_index = 5;
let chunk = ErasureChunk { let chunk = ErasureChunk {
...@@ -273,7 +273,7 @@ fn store_block_works() { ...@@ -273,7 +273,7 @@ fn store_block_works() {
let test_state = TestState::default(); let test_state = TestState::default();
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness; let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::from([1; 32]); let candidate_hash = CandidateHash(Hash::from([1; 32]));
let validator_index = 5; let validator_index = 5;
let n_validators = 10; let n_validators = 10;
...@@ -327,7 +327,7 @@ fn store_pov_and_query_chunk_works() { ...@@ -327,7 +327,7 @@ fn store_pov_and_query_chunk_works() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness; let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::from([1; 32]); let candidate_hash = CandidateHash(Hash::from([1; 32]));
let n_validators = 10; let n_validators = 10;
let pov = PoV { let pov = PoV {
...@@ -370,7 +370,7 @@ fn stored_but_not_included_chunk_is_pruned() { ...@@ -370,7 +370,7 @@ fn stored_but_not_included_chunk_is_pruned() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness; let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::repeat_byte(1); let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let relay_parent = Hash::repeat_byte(2); let relay_parent = Hash::repeat_byte(2);
let validator_index = 5; let validator_index = 5;
...@@ -425,7 +425,7 @@ fn stored_but_not_included_data_is_pruned() { ...@@ -425,7 +425,7 @@ fn stored_but_not_included_data_is_pruned() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness; let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::repeat_byte(1); let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let n_validators = 10; let n_validators = 10;
let pov = PoV { let pov = PoV {
...@@ -852,7 +852,7 @@ fn forkfullness_works() { ...@@ -852,7 +852,7 @@ fn forkfullness_works() {
async fn query_available_data( async fn query_available_data(
virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>, virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
candidate_hash: Hash, candidate_hash: CandidateHash,
) -> Option<AvailableData> { ) -> Option<AvailableData> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
...@@ -864,7 +864,7 @@ async fn query_available_data( ...@@ -864,7 +864,7 @@ async fn query_available_data(
async fn query_chunk( async fn query_chunk(
virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>, virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
candidate_hash: Hash, candidate_hash: CandidateHash,
index: u32, index: u32,
) -> Option<ErasureChunk> { ) -> Option<ErasureChunk> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
......
...@@ -32,7 +32,7 @@ use futures::{ ...@@ -32,7 +32,7 @@ use futures::{
use sp_keystore::SyncCryptoStorePtr; use sp_keystore::SyncCryptoStorePtr;
use polkadot_primitives::v1::{ use polkadot_primitives::v1::{
CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorId, CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorId,
ValidatorIndex, SigningContext, PoV, ValidatorIndex, SigningContext, PoV, CandidateHash,
CandidateDescriptor, AvailableData, ValidatorSignature, Hash, CandidateReceipt, CandidateDescriptor, AvailableData, ValidatorSignature, Hash, CandidateReceipt,
CandidateCommitments, CoreState, CoreIndex, CollatorId, ValidationOutputs, CandidateCommitments, CoreState, CoreIndex, CollatorId, ValidationOutputs,
}; };
...@@ -103,12 +103,12 @@ struct CandidateBackingJob { ...@@ -103,12 +103,12 @@ struct CandidateBackingJob {
/// The collator required to author the candidate, if any. /// The collator required to author the candidate, if any.
required_collator: Option<CollatorId>, required_collator: Option<CollatorId>,
/// We issued `Valid` or `Invalid` statements on about these candidates. /// We issued `Valid` or `Invalid` statements on about these candidates.
issued_statements: HashSet<Hash>, issued_statements: HashSet<CandidateHash>,
/// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash.
seconded: Option<Hash>, seconded: Option<CandidateHash>,
/// The candidates that are includable, by hash. Each entry here indicates /// The candidates that are includable, by hash. Each entry here indicates
/// that we've sent the provisioner the backed candidate. /// that we've sent the provisioner the backed candidate.
backed: HashSet<Hash>, backed: HashSet<CandidateHash>,
/// We have already reported misbehaviors for these validators. /// We have already reported misbehaviors for these validators.
reported_misbehavior_for: HashSet<ValidatorIndex>, reported_misbehavior_for: HashSet<ValidatorIndex>,
keystore: SyncCryptoStorePtr, keystore: SyncCryptoStorePtr,
...@@ -131,12 +131,12 @@ struct TableContext { ...@@ -131,12 +131,12 @@ struct TableContext {
impl TableContextTrait for TableContext { impl TableContextTrait for TableContext {
type AuthorityId = ValidatorIndex; type AuthorityId = ValidatorIndex;
type Digest = Hash; type Digest = CandidateHash;
type GroupId = ParaId; type GroupId = ParaId;
type Signature = ValidatorSignature; type Signature = ValidatorSignature;
type Candidate = CommittedCandidateReceipt; type Candidate = CommittedCandidateReceipt;
fn candidate_digest(candidate: &CommittedCandidateReceipt) -> Hash { fn candidate_digest(candidate: &CommittedCandidateReceipt) -> CandidateHash {
candidate.hash() candidate.hash()
} }
...@@ -341,6 +341,7 @@ impl CandidateBackingJob { ...@@ -341,6 +341,7 @@ impl CandidateBackingJob {
// the collator, do not make available and report the collator. // the collator, do not make available and report the collator.
let commitments_check = self.make_pov_available( let commitments_check = self.make_pov_available(
pov, pov,
candidate_hash,
validation_data, validation_data,
outputs, outputs,
|commitments| if commitments.hash() == candidate.commitments_hash { |commitments| if commitments.hash() == candidate.commitments_hash {
...@@ -525,7 +526,7 @@ impl CandidateBackingJob { ...@@ -525,7 +526,7 @@ impl CandidateBackingJob {
&mut self, &mut self,
summary: TableSummary, summary: TableSummary,
) -> Result<(), Error> { ) -> Result<(), Error> {
let candidate_hash = summary.candidate.clone(); let candidate_hash = summary.candidate;
if self.issued_statements.contains(&candidate_hash) { if self.issued_statements.contains(&candidate_hash) {
return Ok(()) return Ok(())
...@@ -559,6 +560,7 @@ impl CandidateBackingJob { ...@@ -559,6 +560,7 @@ impl CandidateBackingJob {
// If validation produces a new set of commitments, we vote the candidate as invalid. // If validation produces a new set of commitments, we vote the candidate as invalid.
let commitments_check = self.make_pov_available( let commitments_check = self.make_pov_available(
(&*pov).clone(), (&*pov).clone(),
candidate_hash,
validation_data, validation_data,
outputs, outputs,
|commitments| if commitments == expected_commitments { |commitments| if commitments == expected_commitments {
...@@ -667,12 +669,13 @@ impl CandidateBackingJob { ...@@ -667,12 +669,13 @@ impl CandidateBackingJob {
&mut self, &mut self,
id: Option<ValidatorIndex>, id: Option<ValidatorIndex>,
n_validators: u32, n_validators: u32,
candidate_hash: CandidateHash,
available_data: AvailableData, available_data: AvailableData,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
self.tx_from.send(FromJob::AvailabilityStore( self.tx_from.send(FromJob::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData( AvailabilityStoreMessage::StoreAvailableData(
self.parent, candidate_hash,
id, id,
n_validators, n_validators,
available_data, available_data,
...@@ -694,6 +697,7 @@ impl CandidateBackingJob { ...@@ -694,6 +697,7 @@ impl CandidateBackingJob {
async fn make_pov_available<T, E>( async fn make_pov_available<T, E>(
&mut self, &mut self,
pov: PoV, pov: PoV,
candidate_hash: CandidateHash,
validation_data: polkadot_primitives::v1::PersistedValidationData, validation_data: polkadot_primitives::v1::PersistedValidationData,
outputs: ValidationOutputs, outputs: ValidationOutputs,
with_commitments: impl FnOnce(CandidateCommitments) -> Result<T, E>, with_commitments: impl FnOnce(CandidateCommitments) -> Result<T, E>,
...@@ -727,6 +731,7 @@ impl CandidateBackingJob { ...@@ -727,6 +731,7 @@ impl CandidateBackingJob {
self.store_available_data( self.store_available_data(
self.table_context.validator.as_ref().map(|v| v.index()), self.table_context.validator.as_ref().map(|v| v.index()),
self.table_context.validators.len() as u32, self.table_context.validators.len() as u32,
candidate_hash,
available_data, available_data,
).await?; ).await?;
...@@ -1206,8 +1211,8 @@ mod tests { ...@@ -1206,8 +1211,8 @@ mod tests {
assert_matches!( assert_matches!(
virtual_overseer.recv().await, virtual_overseer.recv().await,
AllMessages::AvailabilityStore( AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => { ) if candidate_hash == candidate.hash() => {
tx.send(Ok(())).unwrap(); tx.send(Ok(())).unwrap();
} }
); );
...@@ -1333,8 +1338,8 @@ mod tests { ...@@ -1333,8 +1338,8 @@ mod tests {
assert_matches!( assert_matches!(
virtual_overseer.recv().await, virtual_overseer.recv().await,
AllMessages::AvailabilityStore( AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => { ) if candidate_hash == candidate_a.hash() => {
tx.send(Ok(())).unwrap(); tx.send(Ok(())).unwrap();
} }
); );
...@@ -1482,8 +1487,8 @@ mod tests { ...@@ -1482,8 +1487,8 @@ mod tests {
assert_matches!( assert_matches!(
virtual_overseer.recv().await, virtual_overseer.recv().await,
AllMessages::AvailabilityStore( AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => { ) if candidate_hash == candidate_a.hash() => {
tx.send(Ok(())).unwrap(); tx.send(Ok(())).unwrap();
} }
); );
...@@ -1665,8 +1670,8 @@ mod tests { ...@@ -1665,8 +1670,8 @@ mod tests {
assert_matches!( assert_matches!(
virtual_overseer.recv().await, virtual_overseer.recv().await,
AllMessages::AvailabilityStore( AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => { ) if candidate_hash == candidate_b.hash() => {
tx.send(Ok(())).unwrap(); tx.send(Ok(())).unwrap();
} }
); );
......
...@@ -174,7 +174,7 @@ async fn get_core_availability( ...@@ -174,7 +174,7 @@ async fn get_core_availability(
.await .await
.send( .send(
AvailabilityStoreMessage::QueryChunkAvailability( AvailabilityStoreMessage::QueryChunkAvailability(
committed_candidate_receipt.descriptor.pov_hash, committed_candidate_receipt.hash(),
validator_idx, validator_idx,
tx, tx,
).into(), ).into(),
......
...@@ -38,7 +38,7 @@ use polkadot_node_network_protocol::{ ...@@ -38,7 +38,7 @@ use polkadot_node_network_protocol::{
use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_primitives::v1::{ use polkadot_primitives::v1::{
BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, Hash, HashT, Id as ParaId, BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, Hash, HashT, Id as ParaId,
SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, CandidateHash,
}; };
use polkadot_subsystem::messages::{ use polkadot_subsystem::messages::{
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, ChainApiMessage, AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, ChainApiMessage,
...@@ -130,7 +130,7 @@ const BENEFIT_VALID_MESSAGE: Rep = Rep::new(10, "Valid message"); ...@@ -130,7 +130,7 @@ const BENEFIT_VALID_MESSAGE: Rep = Rep::new(10, "Valid message");
#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, Hash)] #[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, Hash)]
pub struct AvailabilityGossipMessage { pub struct AvailabilityGossipMessage {
/// Anchor hash of the candidate the `ErasureChunk` is associated to. /// Anchor hash of the candidate the `ErasureChunk` is associated to.
pub candidate_hash: Hash, pub candidate_hash: CandidateHash,
/// The erasure chunk, a encoded information part of `AvailabilityData`. /// The erasure chunk, a encoded information part of `AvailabilityData`.
pub erasure_chunk: ErasureChunk, pub erasure_chunk: ErasureChunk,
} }
...@@ -149,13 +149,13 @@ struct ProtocolState { ...@@ -149,13 +149,13 @@ struct ProtocolState {
/// Caches a mapping of relay parents or ancestor to live candidate receipts. /// Caches a mapping of relay parents or ancestor to live candidate receipts.
/// Allows fast intersection of live candidates with views and consecutive unioning. /// Allows fast intersection of live candidates with views and consecutive unioning.
/// Maps relay parent / ancestor -> live candidate receipts + its hash. /// Maps relay parent / ancestor -> live candidate receipts + its hash.
receipts: HashMap<Hash, HashSet<(Hash, CommittedCandidateReceipt)>>, receipts: HashMap<Hash, HashSet<(CandidateHash, CommittedCandidateReceipt)>>,
/// Allow reverse caching of view checks. /// Allow reverse caching of view checks.
/// Maps candidate hash -> relay parent for extracting meta information from `PerRelayParent`. /// Maps candidate hash -> relay parent for extracting meta information from `PerRelayParent`.
/// Note that the presence of this is not sufficient to determine if deletion is OK, i.e. /// Note that the presence of this is not sufficient to determine if deletion is OK, i.e.
/// two histories could cover this. /// two histories could cover this.
reverse: HashMap<Hash, Hash>, reverse: HashMap<CandidateHash, Hash>,
/// Keeps track of which candidate receipts are required due to ancestors of which relay parents /// Keeps track of which candidate receipts are required due to ancestors of which relay parents
/// of our view. /// of our view.
...@@ -166,7 +166,7 @@ struct ProtocolState { ...@@ -166,7 +166,7 @@ struct ProtocolState {
per_relay_parent: HashMap<Hash, PerRelayParent>, per_relay_parent: HashMap<Hash, PerRelayParent>,
/// Track data that is specific to a candidate. /// Track data that is specific to a candidate.
per_candidate: