Unverified Commit 76720173 authored by Bastian Köcher's avatar Bastian Köcher Committed by GitHub
Browse files

Make `CandidateHash` a real type (#1916)



* Make `CandidateHash` a real type

This pr adds a new type `CandidateHash` that is used instead of the
opaque `Hash` type. This helps to ensure on the type system level that
we are passing the correct types.

This pr also fixes wrong usage of `relay_parent` as `candidate_hash`
when communicating with the av storage.

* Update core-primitives/src/lib.rs

Co-authored-by: default avatarPeter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>

* Wrap the lines

Co-authored-by: default avatarPeter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>
parent 9758a180
......@@ -50,6 +50,14 @@ pub type ChainId = u32;
/// A hash of some data used by the relay chain.
pub type Hash = sp_core::H256;
/// Unit type wrapper around [`Hash`] that represents a candidate hash.
///
/// This type is produced by [`CandidateReceipt::hash`].
///
/// This type makes it easy to enforce that a hash is a candidate hash on the type level.
#[derive(Clone, Copy, codec::Encode, codec::Decode, Hash, Eq, PartialEq, Debug, Default)]
pub struct CandidateHash(pub Hash);
/// Index of a transaction in the relay chain. 32-bit should be plenty.
pub type Nonce = u32;
......
......@@ -33,7 +33,7 @@ use kvdb_rocksdb::{Database, DatabaseConfig};
use kvdb::{KeyValueDB, DBTransaction};
use polkadot_primitives::v1::{
Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex,
Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, CandidateHash,
};
use polkadot_subsystem::{
FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem,
......@@ -242,7 +242,7 @@ enum CandidateState {
#[derive(Debug, Decode, Encode, Eq)]
struct PoVPruningRecord {
candidate_hash: Hash,
candidate_hash: CandidateHash,
block_number: BlockNumber,
candidate_state: CandidateState,
prune_at: PruningDelay,
......@@ -272,7 +272,7 @@ impl PartialOrd for PoVPruningRecord {
#[derive(Debug, Decode, Encode, Eq)]
struct ChunkPruningRecord {
candidate_hash: Hash,
candidate_hash: CandidateHash,
block_number: BlockNumber,
candidate_state: CandidateState,
chunk_index: u32,
......@@ -387,11 +387,11 @@ impl AvailabilityStoreSubsystem {
}
}
fn available_data_key(candidate_hash: &Hash) -> Vec<u8> {
fn available_data_key(candidate_hash: &CandidateHash) -> Vec<u8> {
(candidate_hash, 0i8).encode()
}
fn erasure_chunk_key(candidate_hash: &Hash, index: u32) -> Vec<u8> {
fn erasure_chunk_key(candidate_hash: &CandidateHash, index: u32) -> Vec<u8> {
(candidate_hash, index, 0i8).encode()
}
......@@ -564,7 +564,7 @@ where
log::trace!(
target: LOG_TARGET,
"Updating pruning record for finalized block {}",
record.candidate_hash,
record.block_number,
);
record.prune_at = PruningDelay::into_the_future(
......@@ -583,7 +583,7 @@ where
log::trace!(
target: LOG_TARGET,
"Updating chunk pruning record for finalized block {}",
record.candidate_hash,
record.block_number,
);
record.prune_at = PruningDelay::into_the_future(
......@@ -620,7 +620,7 @@ where
for event in events.into_iter() {
if let CandidateEvent::CandidateIncluded(receipt, _) = event {
log::trace!(target: LOG_TARGET, "Candidate {} was included", receipt.hash());
log::trace!(target: LOG_TARGET, "Candidate {:?} was included", receipt.hash());
included.insert(receipt.hash());
}
}
......@@ -729,7 +729,10 @@ where
Ok(())
}
fn available_data(db: &Arc<dyn KeyValueDB>, candidate_hash: &Hash) -> Option<StoredAvailableData> {
fn available_data(
db: &Arc<dyn KeyValueDB>,
candidate_hash: &CandidateHash,
) -> Option<StoredAvailableData> {
query_inner(db, columns::DATA, &available_data_key(candidate_hash))
}
......@@ -835,7 +838,7 @@ where
fn store_available_data(
subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash,
candidate_hash: &CandidateHash,
id: Option<ValidatorIndex>,
n_validators: u32,
available_data: AvailableData,
......@@ -872,7 +875,7 @@ fn store_available_data(
}
let pruning_record = PoVPruningRecord {
candidate_hash: candidate_hash.clone(),
candidate_hash: *candidate_hash,
block_number,
candidate_state: CandidateState::Stored,
prune_at,
......@@ -901,7 +904,7 @@ fn store_available_data(
fn store_chunk(
subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash,
candidate_hash: &CandidateHash,
_n_validators: u32,
chunk: ErasureChunk,
block_number: BlockNumber,
......@@ -952,7 +955,7 @@ fn store_chunk(
fn get_chunk(
subsystem: &mut AvailabilityStoreSubsystem,
candidate_hash: &Hash,
candidate_hash: &CandidateHash,
index: u32,
) -> Result<Option<ErasureChunk>, Error> {
if let Some(chunk) = query_inner(
......@@ -981,7 +984,11 @@ fn get_chunk(
Ok(None)
}
fn query_inner<D: Decode>(db: &Arc<dyn KeyValueDB>, column: u32, key: &[u8]) -> Option<D> {
fn query_inner<D: Decode>(
db: &Arc<dyn KeyValueDB>,
column: u32,
key: &[u8],
) -> Option<D> {
match db.get(column, key) {
Ok(Some(raw)) => {
let res = D::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed");
......
......@@ -27,7 +27,7 @@ use smallvec::smallvec;
use polkadot_primitives::v1::{
AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData,
PersistedValidationData, PoV, Id as ParaId,
PersistedValidationData, PoV, Id as ParaId, CandidateHash,
};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_subsystem::{
......@@ -199,7 +199,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() {
// but that's fine, we're still alive
let (tx, rx) = oneshot::channel();
let candidate_hash = Hash::repeat_byte(33);
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let query_chunk = AvailabilityStoreMessage::QueryChunk(
candidate_hash,
......@@ -220,7 +220,7 @@ fn store_chunk_works() {
test_harness(PruningConfig::default(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let relay_parent = Hash::repeat_byte(32);
let candidate_hash = Hash::repeat_byte(33);
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let chunk = ErasureChunk {
......@@ -273,7 +273,7 @@ fn store_block_works() {
let test_state = TestState::default();
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::from([1; 32]);
let candidate_hash = CandidateHash(Hash::from([1; 32]));
let validator_index = 5;
let n_validators = 10;
......@@ -327,7 +327,7 @@ fn store_pov_and_query_chunk_works() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::from([1; 32]);
let candidate_hash = CandidateHash(Hash::from([1; 32]));
let n_validators = 10;
let pov = PoV {
......@@ -370,7 +370,7 @@ fn stored_but_not_included_chunk_is_pruned() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::repeat_byte(1);
let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let relay_parent = Hash::repeat_byte(2);
let validator_index = 5;
......@@ -425,7 +425,7 @@ fn stored_but_not_included_data_is_pruned() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = Hash::repeat_byte(1);
let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let n_validators = 10;
let pov = PoV {
......@@ -852,7 +852,7 @@ fn forkfullness_works() {
async fn query_available_data(
virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
candidate_hash: Hash,
candidate_hash: CandidateHash,
) -> Option<AvailableData> {
let (tx, rx) = oneshot::channel();
......@@ -864,7 +864,7 @@ async fn query_available_data(
async fn query_chunk(
virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
candidate_hash: Hash,
candidate_hash: CandidateHash,
index: u32,
) -> Option<ErasureChunk> {
let (tx, rx) = oneshot::channel();
......
......@@ -32,7 +32,7 @@ use futures::{
use sp_keystore::SyncCryptoStorePtr;
use polkadot_primitives::v1::{
CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorId,
ValidatorIndex, SigningContext, PoV,
ValidatorIndex, SigningContext, PoV, CandidateHash,
CandidateDescriptor, AvailableData, ValidatorSignature, Hash, CandidateReceipt,
CandidateCommitments, CoreState, CoreIndex, CollatorId, ValidationOutputs,
};
......@@ -103,12 +103,12 @@ struct CandidateBackingJob {
/// The collator required to author the candidate, if any.
required_collator: Option<CollatorId>,
/// We issued `Valid` or `Invalid` statements on about these candidates.
issued_statements: HashSet<Hash>,
issued_statements: HashSet<CandidateHash>,
/// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash.
seconded: Option<Hash>,
seconded: Option<CandidateHash>,
/// The candidates that are includable, by hash. Each entry here indicates
/// that we've sent the provisioner the backed candidate.
backed: HashSet<Hash>,
backed: HashSet<CandidateHash>,
/// We have already reported misbehaviors for these validators.
reported_misbehavior_for: HashSet<ValidatorIndex>,
keystore: SyncCryptoStorePtr,
......@@ -131,12 +131,12 @@ struct TableContext {
impl TableContextTrait for TableContext {
type AuthorityId = ValidatorIndex;
type Digest = Hash;
type Digest = CandidateHash;
type GroupId = ParaId;
type Signature = ValidatorSignature;
type Candidate = CommittedCandidateReceipt;
fn candidate_digest(candidate: &CommittedCandidateReceipt) -> Hash {
fn candidate_digest(candidate: &CommittedCandidateReceipt) -> CandidateHash {
candidate.hash()
}
......@@ -341,6 +341,7 @@ impl CandidateBackingJob {
// the collator, do not make available and report the collator.
let commitments_check = self.make_pov_available(
pov,
candidate_hash,
validation_data,
outputs,
|commitments| if commitments.hash() == candidate.commitments_hash {
......@@ -525,7 +526,7 @@ impl CandidateBackingJob {
&mut self,
summary: TableSummary,
) -> Result<(), Error> {
let candidate_hash = summary.candidate.clone();
let candidate_hash = summary.candidate;
if self.issued_statements.contains(&candidate_hash) {
return Ok(())
......@@ -559,6 +560,7 @@ impl CandidateBackingJob {
// If validation produces a new set of commitments, we vote the candidate as invalid.
let commitments_check = self.make_pov_available(
(&*pov).clone(),
candidate_hash,
validation_data,
outputs,
|commitments| if commitments == expected_commitments {
......@@ -667,12 +669,13 @@ impl CandidateBackingJob {
&mut self,
id: Option<ValidatorIndex>,
n_validators: u32,
candidate_hash: CandidateHash,
available_data: AvailableData,
) -> Result<(), Error> {
let (tx, rx) = oneshot::channel();
self.tx_from.send(FromJob::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(
self.parent,
candidate_hash,
id,
n_validators,
available_data,
......@@ -694,6 +697,7 @@ impl CandidateBackingJob {
async fn make_pov_available<T, E>(
&mut self,
pov: PoV,
candidate_hash: CandidateHash,
validation_data: polkadot_primitives::v1::PersistedValidationData,
outputs: ValidationOutputs,
with_commitments: impl FnOnce(CandidateCommitments) -> Result<T, E>,
......@@ -727,6 +731,7 @@ impl CandidateBackingJob {
self.store_available_data(
self.table_context.validator.as_ref().map(|v| v.index()),
self.table_context.validators.len() as u32,
candidate_hash,
available_data,
).await?;
......@@ -1206,8 +1211,8 @@ mod tests {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => {
AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if candidate_hash == candidate.hash() => {
tx.send(Ok(())).unwrap();
}
);
......@@ -1333,8 +1338,8 @@ mod tests {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => {
AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if candidate_hash == candidate_a.hash() => {
tx.send(Ok(())).unwrap();
}
);
......@@ -1482,8 +1487,8 @@ mod tests {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => {
AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if candidate_hash == candidate_a.hash() => {
tx.send(Ok(())).unwrap();
}
);
......@@ -1665,8 +1670,8 @@ mod tests {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx)
) if parent_hash == test_state.relay_parent => {
AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx)
) if candidate_hash == candidate_b.hash() => {
tx.send(Ok(())).unwrap();
}
);
......
......@@ -174,7 +174,7 @@ async fn get_core_availability(
.await
.send(
AvailabilityStoreMessage::QueryChunkAvailability(
committed_candidate_receipt.descriptor.pov_hash,
committed_candidate_receipt.hash(),
validator_idx,
tx,
).into(),
......
......@@ -38,7 +38,7 @@ use polkadot_node_network_protocol::{
use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_primitives::v1::{
BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, Hash, HashT, Id as ParaId,
SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID,
SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, CandidateHash,
};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, ChainApiMessage,
......@@ -130,7 +130,7 @@ const BENEFIT_VALID_MESSAGE: Rep = Rep::new(10, "Valid message");
#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, Hash)]
pub struct AvailabilityGossipMessage {
/// Anchor hash of the candidate the `ErasureChunk` is associated to.
pub candidate_hash: Hash,
pub candidate_hash: CandidateHash,
/// The erasure chunk, a encoded information part of `AvailabilityData`.
pub erasure_chunk: ErasureChunk,
}
......@@ -149,13 +149,13 @@ struct ProtocolState {
/// Caches a mapping of relay parents or ancestor to live candidate receipts.
/// Allows fast intersection of live candidates with views and consecutive unioning.
/// Maps relay parent / ancestor -> live candidate receipts + its hash.
receipts: HashMap<Hash, HashSet<(Hash, CommittedCandidateReceipt)>>,
receipts: HashMap<Hash, HashSet<(CandidateHash, CommittedCandidateReceipt)>>,
/// Allow reverse caching of view checks.
/// Maps candidate hash -> relay parent for extracting meta information from `PerRelayParent`.
/// Note that the presence of this is not sufficient to determine if deletion is OK, i.e.
/// two histories could cover this.
reverse: HashMap<Hash, Hash>,
reverse: HashMap<CandidateHash, Hash>,
/// Keeps track of which candidate receipts are required due to ancestors of which relay parents
/// of our view.
......@@ -166,7 +166,7 @@ struct ProtocolState {
per_relay_parent: HashMap<Hash, PerRelayParent>,
/// Track data that is specific to a candidate.
per_candidate: HashMap<Hash, PerCandidate>,
per_candidate: HashMap<CandidateHash, PerCandidate>,
}
#[derive(Debug, Clone, Default)]
......@@ -176,11 +176,11 @@ struct PerCandidate {
/// candidate hash + erasure chunk index -> gossip message
message_vault: HashMap<u32, AvailabilityGossipMessage>,
/// Track received candidate hashes and chunk indices from peers.
received_messages: HashMap<PeerId, HashSet<(Hash, ValidatorIndex)>>,
/// Track received candidate hashes and validator indices from peers.
received_messages: HashMap<PeerId, HashSet<(CandidateHash, ValidatorIndex)>>,
/// Track already sent candidate hashes and the erasure chunk index to the peers.
sent_messages: HashMap<PeerId, HashSet<(Hash, ValidatorIndex)>>,
sent_messages: HashMap<PeerId, HashSet<(CandidateHash, ValidatorIndex)>>,
/// The set of validators.
validators: Vec<ValidatorId>,
......@@ -221,7 +221,7 @@ impl ProtocolState {
fn cached_live_candidates_unioned<'a>(
&'a self,
relay_parents: impl IntoIterator<Item = &'a Hash> + 'a,
) -> HashMap<Hash, CommittedCandidateReceipt> {
) -> HashMap<CandidateHash, CommittedCandidateReceipt> {
let relay_parents_and_ancestors = self.extend_with_ancestors(relay_parents);
relay_parents_and_ancestors
.into_iter()
......@@ -229,7 +229,7 @@ impl ProtocolState {
.map(|receipt_set| receipt_set.into_iter())
.flatten()
.map(|(receipt_hash, receipt)| (receipt_hash.clone(), receipt.clone()))
.collect::<HashMap<Hash, CommittedCandidateReceipt>>()
.collect()
}
async fn add_relay_parent<Context>(
......@@ -296,8 +296,9 @@ impl ProtocolState {
// remove from the ancestry index
self.ancestry.remove(relay_parent);
// and also remove the actual receipt
self.receipts.remove(relay_parent);
self.per_candidate.remove(relay_parent);
if let Some(candidates) = self.receipts.remove(relay_parent) {
candidates.into_iter().for_each(|c| { self.per_candidate.remove(&c.0); });
}
}
}
if let Some(per_relay_parent) = self.per_relay_parent.remove(relay_parent) {
......@@ -313,8 +314,9 @@ impl ProtocolState {
// remove from the ancestry index
self.ancestry.remove(&ancestor);
// and also remove the actual receipt
self.receipts.remove(&ancestor);
self.per_candidate.remove(&ancestor);
if let Some(candidates) = self.receipts.remove(&ancestor) {
candidates.into_iter().for_each(|c| { self.per_candidate.remove(&c.0); });
}
}
}
}
......@@ -645,8 +647,7 @@ where
let live_candidates = state.cached_live_candidates_unioned(state.view.0.iter());
// check if the candidate is of interest
let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash)
{
let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash) {
live_candidate
} else {
return modify_reputation(ctx, origin, COST_NOT_A_LIVE_CANDIDATE).await;
......@@ -862,7 +863,7 @@ async fn query_live_candidates<Context>(
ctx: &mut Context,
state: &mut ProtocolState,
relay_parents: impl IntoIterator<Item = Hash>,
) -> Result<HashMap<Hash, (Hash, CommittedCandidateReceipt)>>
) -> Result<HashMap<Hash, (CandidateHash, CommittedCandidateReceipt)>>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
......@@ -871,7 +872,7 @@ where
let capacity = hint.1.unwrap_or(hint.0) * (1 + AvailabilityDistributionSubsystem::K);
let mut live_candidates =
HashMap::<Hash, (Hash, CommittedCandidateReceipt)>::with_capacity(capacity);
HashMap::<Hash, (CandidateHash, CommittedCandidateReceipt)>::with_capacity(capacity);
for relay_parent in iter {
// register one of relay parents (not the ancestors)
......@@ -969,7 +970,7 @@ where
}
/// Query the proof of validity for a particular candidate hash.
async fn query_data_availability<Context>(ctx: &mut Context, candidate_hash: Hash) -> Result<bool>
async fn query_data_availability<Context>(ctx: &mut Context, candidate_hash: CandidateHash) -> Result<bool>
where
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
{
......@@ -985,7 +986,7 @@ where
async fn query_chunk<Context>(
ctx: &mut Context,
candidate_hash: Hash,
candidate_hash: CandidateHash,
validator_index: ValidatorIndex,
) -> Result<Option<ErasureChunk>>
where
......@@ -1002,7 +1003,7 @@ where
async fn store_chunk<Context>(
ctx: &mut Context,
candidate_hash: Hash,
candidate_hash: CandidateHash,
relay_parent: Hash,
validator_index: ValidatorIndex,
erasure_chunk: ErasureChunk,
......
......@@ -254,7 +254,7 @@ fn make_erasure_root(test: &TestState, pov: PoV) -> Hash {
fn make_valid_availability_gossip(
test: &TestState,
candidate_hash: Hash,
candidate_hash: CandidateHash,
erasure_chunk_index: u32,
pov: PoV,
) -> AvailabilityGossipMessage {
......@@ -320,7 +320,7 @@ fn helper_integrity() {
.build();
let message =
make_valid_availability_gossip(&test_state, dbg!(candidate.hash()), 2, pov_block.clone());
make_valid_availability_gossip(&test_state, candidate.hash(), 2, pov_block.clone());
let root = dbg!(&candidate.commitments.erasure_root);
......
......@@ -186,7 +186,7 @@ impl View {
pub mod v1 {
use polkadot_primitives::v1::{
Hash, CollatorId, Id as ParaId, ErasureChunk, CandidateReceipt,
SignedAvailabilityBitfield, PoV,
SignedAvailabilityBitfield, PoV, CandidateHash,
};
use polkadot_node_primitives::SignedFullStatement;
use parity_scale_codec::{Encode, Decode};
......@@ -198,7 +198,7 @@ pub mod v1 {
pub enum AvailabilityDistributionMessage {
/// An erasure chunk for a given candidate hash.
#[codec(index = "0")]
Chunk(Hash, ErasureChunk),
Chunk(CandidateHash, ErasureChunk),
}
/// Network messages used by the bitfield distribution subsystem.
......
......@@ -35,7 +35,7 @@ use polkadot_node_subsystem_util::{
};
use node_primitives::SignedFullStatement;
use polkadot_primitives::v1::{
Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature,
Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature, CandidateHash,
};
use polkadot_node_network_protocol::{
v1 as protocol_v1, View, PeerId, ReputationChange as Rep, NetworkBridgeEvent,
......@@ -102,32 +102,32 @@ impl StatementDistribution {
/// via other means.
#[derive(Default)]
struct VcPerPeerTracker {
local_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>,
remote_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>,
local_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
remote_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
}
impl VcPerPeerTracker {
// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
// based on a message that we have sent it from our local pool.
fn note_local(&mut self, h: Hash) {
/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
/// based on a message that we have sent it from our local pool.
fn note_local(&mut self, h: CandidateHash) {
if !note_hash(&mut self.local_observed, h) {
log::warn!("Statement distribution is erroneously attempting to distribute more \
than {} candidate(s) per validator index. Ignoring", VC_THRESHOLD);
}
}
// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
// based on a message that it has sent us.
//
// Returns `true` if the peer was allowed to send us such a message, `false` otherwise.
fn note_remote(&mut self, h: Hash) -> bool {
/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
/// based on a message that it has sent us.
///
/// Returns `true` if the peer was allowed to send us such a message, `false` otherwise.
fn note_remote(&mut self, h: CandidateHash) -> bool {
note_hash(&mut self.remote_observed, h)
}
}
fn note_hash(
observed: &mut arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>,
h: Hash,
observed: &mut arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
h: CandidateHash,
) -> bool {
if observed.contains(&h) { return true; }
......@@ -139,7 +139,7 @@ fn note_hash(
struct PeerRelayParentKnowledge {
/// candidates that the peer is aware of. This indicates that we can
/// send other statements pertaining to that candidate.
known_candidates: HashSet<Hash>,
known_candidates: HashSet<CandidateHash>,
/// fingerprints of all statements a peer should be aware of: those that