Unverified Commit 35501275 authored by asynchronous rob's avatar asynchronous rob Committed by GitHub
Browse files

Improve approval tracing (#2697)

* improve tracing for approval voting

* assignment criteria tracing

* new syntax
parent 7012cdbb
Pipeline #130371 failed with stages
in 21 minutes and 49 seconds
......@@ -20,7 +20,7 @@ use polkadot_node_primitives::approval::{
self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory,
};
use polkadot_primitives::v1::{
CoreIndex, ValidatorIndex, SessionInfo, AssignmentPair, AssignmentId, GroupIndex,
CoreIndex, ValidatorIndex, SessionInfo, AssignmentPair, AssignmentId, GroupIndex, CandidateHash,
};
use sc_keystore::LocalKeystore;
use parity_scale_codec::{Encode, Decode};
......@@ -178,7 +178,7 @@ pub(crate) trait AssignmentCriteria {
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: Vec<(CoreIndex, GroupIndex)>,
leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
) -> HashMap<CoreIndex, OurAssignment>;
fn check_assignment_cert(
......@@ -200,7 +200,7 @@ impl AssignmentCriteria for RealAssignmentCriteria {
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: Vec<(CoreIndex, GroupIndex)>,
leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
) -> HashMap<CoreIndex, OurAssignment> {
compute_assignments(
keystore,
......@@ -244,7 +244,7 @@ pub(crate) fn compute_assignments(
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: impl IntoIterator<Item = (CoreIndex, GroupIndex)> + Clone,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex, GroupIndex)> + Clone,
) -> HashMap<CoreIndex, OurAssignment> {
if config.n_cores == 0 || config.assignment_keys.is_empty() || config.validator_groups.is_empty() {
return HashMap::new()
......@@ -271,8 +271,8 @@ pub(crate) fn compute_assignments(
// Ignore any cores where the assigned group is our own.
let leaving_cores = leaving_cores.into_iter()
.filter(|&(_, ref g)| !is_in_backing_group(&config.validator_groups, index, *g))
.map(|(c, _)| c)
.filter(|&(_, _, ref g)| !is_in_backing_group(&config.validator_groups, index, *g))
.map(|(c_hash, core, _)| (c_hash, core))
.collect::<Vec<_>>();
let assignments_key: &sp_application_crypto::sr25519::Pair = assignments_key.as_ref();
......@@ -308,7 +308,7 @@ fn compute_relay_vrf_modulo_assignments(
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
leaving_cores: impl IntoIterator<Item = CoreIndex> + Clone,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex)> + Clone,
assignments: &mut HashMap<CoreIndex, OurAssignment>,
) {
for rvm_sample in 0..config.relay_vrf_modulo_samples {
......@@ -322,7 +322,18 @@ fn compute_relay_vrf_modulo_assignments(
relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample),
|vrf_in_out| {
*core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores);
if leaving_cores.clone().into_iter().any(|c| c == *core) {
if let Some((candidate_hash, _))
= leaving_cores.clone().into_iter().find(|(_, c)| c == core)
{
tracing::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
?validator_index,
tranche = 0,
"RelayVRFModulo Assignment."
);
Some(assigned_core_transcript(*core))
} else {
None
......@@ -355,10 +366,10 @@ fn compute_relay_vrf_delay_assignments(
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
leaving_cores: impl IntoIterator<Item = CoreIndex>,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex)>,
assignments: &mut HashMap<CoreIndex, OurAssignment>,
) {
for core in leaving_cores {
for (candidate_hash, core) in leaving_cores {
let (vrf_in_out, vrf_proof, _) = assignments_key.vrf_sign(
relay_vrf_delay_transcript(relay_vrf_story.clone(), core),
);
......@@ -381,11 +392,25 @@ fn compute_relay_vrf_delay_assignments(
triggered: false,
};
match assignments.entry(core) {
Entry::Vacant(e) => { let _ = e.insert(our_assignment); }
let used = match assignments.entry(core) {
Entry::Vacant(e) => { let _ = e.insert(our_assignment); true }
Entry::Occupied(mut e) => if e.get().tranche > our_assignment.tranche {
e.insert(our_assignment);
true
} else {
false
},
};
if used {
tracing::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
?validator_index,
tranche,
"RelayVRFDelay Assignment",
);
}
}
}
......@@ -500,7 +525,7 @@ mod tests {
use sp_keyring::sr25519::Keyring as Sr25519Keyring;
use sp_application_crypto::sr25519;
use sp_core::crypto::Pair as PairT;
use polkadot_primitives::v1::ASSIGNMENT_KEY_TYPE_ID;
use polkadot_primitives::v1::{ASSIGNMENT_KEY_TYPE_ID, Hash};
use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
// sets up a keystore with the given keyring accounts.
......@@ -560,6 +585,9 @@ mod tests {
make_keystore(&[Sr25519Keyring::Alice])
);
let c_a = CandidateHash(Hash::repeat_byte(0));
let c_b = CandidateHash(Hash::repeat_byte(1));
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
......@@ -576,7 +604,7 @@ mod tests {
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
},
vec![(CoreIndex(0), GroupIndex(1)), (CoreIndex(1), GroupIndex(0))],
vec![(c_a, CoreIndex(0), GroupIndex(1)), (c_b, CoreIndex(1), GroupIndex(0))],
);
// Note that alice is in group 0, which was the backing group for core 1.
......@@ -591,6 +619,9 @@ mod tests {
make_keystore(&[Sr25519Keyring::Alice])
);
let c_a = CandidateHash(Hash::repeat_byte(0));
let c_b = CandidateHash(Hash::repeat_byte(1));
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
......@@ -607,7 +638,7 @@ mod tests {
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
},
vec![(CoreIndex(0), GroupIndex(0)), (CoreIndex(1), GroupIndex(1))],
vec![(c_a, CoreIndex(0), GroupIndex(0)), (c_b, CoreIndex(1), GroupIndex(1))],
);
assert_eq!(assignments.len(), 1);
......@@ -680,6 +711,7 @@ mod tests {
&config,
(0..n_cores)
.map(|i| (
CandidateHash(Hash::repeat_byte(i as u8)),
CoreIndex(i as u32),
group_for_core(i),
))
......
......@@ -473,7 +473,7 @@ async fn imported_block_info(
relay_vrf.clone(),
&crate::criteria::Config::from(session_info),
included_candidates.iter()
.map(|(_, _, core, group)| (*core, *group))
.map(|(c_hash, _, core, group)| (*c_hash, *core, *group))
.collect(),
);
......@@ -895,7 +895,7 @@ mod tests {
_keystore: &LocalKeystore,
_relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
_config: &criteria::Config,
_leaving_cores: Vec<(polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
_leaving_cores: Vec<(CandidateHash, polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
) -> HashMap<polkadot_primitives::v1::CoreIndex, criteria::OurAssignment> {
HashMap::new()
}
......
......@@ -605,9 +605,9 @@ async fn handle_from_overseer(
for block_batch in block_imported_candidates {
tracing::debug!(
target: LOG_TARGET,
"Imported new block {} with {} included candidates",
block_batch.block_hash,
block_batch.imported_candidates.len(),
block_hash = ?block_batch.block_hash,
num_candidates = block_batch.imported_candidates.len(),
"Imported new block.",
);
for (c_hash, c_entry) in block_batch.imported_candidates {
......@@ -621,11 +621,11 @@ async fn handle_from_overseer(
let tick = our_tranche as Tick + block_batch.block_tick;
tracing::trace!(
target: LOG_TARGET,
"Scheduling first wakeup at tranche {} for candidate {} in block ({}, tick={})",
our_tranche,
c_hash,
block_batch.block_hash,
block_batch.block_tick,
tranche = our_tranche,
candidate_hash = ?c_hash,
block_hash = ?block_batch.block_hash,
block_tick = block_batch.block_tick,
"Scheduling first wakeup.",
);
// Our first wakeup will just be the tranche of our assignment,
......@@ -987,18 +987,18 @@ fn schedule_wakeup_action(
match maybe_action {
Some(Action::ScheduleWakeup { ref tick, .. }) => tracing::trace!(
target: LOG_TARGET,
"Scheduling next wakeup at {} for candidate {} under block ({}, tick={})",
tick,
candidate_hash,
block_hash,
?candidate_hash,
?block_hash,
block_tick,
"Scheduling next wakeup.",
),
None => tracing::trace!(
target: LOG_TARGET,
"No wakeup needed for candidate {} under block ({}, tick={})",
candidate_hash,
block_hash,
?candidate_hash,
?block_hash,
block_tick,
"No wakeup needed.",
),
Some(_) => {} // unreachable
}
......@@ -1093,9 +1093,10 @@ fn check_and_import_assignment(
} else {
tracing::trace!(
target: LOG_TARGET,
"Imported assignment from validator {} on candidate {:?}",
assignment.validator.0,
(assigned_candidate_hash, candidate_entry.candidate_receipt().descriptor.para_id),
validator = assignment.validator.0,
candidate_hash = ?assigned_candidate_hash,
para_id = ?candidate_entry.candidate_receipt().descriptor.para_id,
"Imported assignment.",
);
AssignmentCheckResult::Accepted
......@@ -1196,9 +1197,11 @@ fn check_and_import_approval<T>(
tracing::trace!(
target: LOG_TARGET,
"Importing approval vote from validator {:?} on candidate {:?}",
(approval.validator, &pubkey),
(approved_candidate_hash, candidate_entry.candidate_receipt().descriptor.para_id),
validator_index = approval.validator.0,
validator = ?pubkey,
candidate_hash = ?approved_candidate_hash,
para_id = ?candidate_entry.candidate_receipt().descriptor.para_id,
"Importing approval vote",
);
let actions = import_checked_approval(
......@@ -1316,9 +1319,9 @@ fn check_and_apply_full_approval(
if let approval_checking::Check::Approved(no_shows) = check {
tracing::trace!(
target: LOG_TARGET,
"Candidate approved {} under block {}",
candidate_hash,
block_hash,
?candidate_hash,
?block_hash,
"Candidate approved under block.",
);
let was_approved = block_entry.is_fully_approved();
......@@ -1441,10 +1444,10 @@ fn process_wakeup(
tracing::trace!(
target: LOG_TARGET,
"Processing wakeup at tranche {} for candidate {} under block {}",
tranche_now,
candidate_hash,
relay_block,
tranche = tranche_now,
?candidate_hash,
block_hash = ?relay_block,
"Processing wakeup",
);
let (should_trigger, backing_group) = {
......@@ -1500,9 +1503,10 @@ fn process_wakeup(
if let Some(i) = index_in_candidate {
tracing::trace!(
target: LOG_TARGET,
"Launching approval work for candidate {:?} in block {}",
(&candidate_hash, candidate_entry.candidate_receipt().descriptor.para_id),
relay_block,
?candidate_hash,
para_id = ?candidate_entry.candidate_receipt().descriptor.para_id,
block_hash = ?relay_block,
"Launching approval work.",
);
// sanity: should always be present.
......@@ -1559,8 +1563,9 @@ async fn launch_approval(
tracing::trace!(
target: LOG_TARGET,
"Recovering data for candidate {:?}",
(candidate_hash, candidate.descriptor.para_id),
?candidate_hash,
para_id = ?candidate.descriptor.para_id,
"Recovering data.",
);
ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
......@@ -1663,8 +1668,9 @@ async fn launch_approval(
tracing::trace!(
target: LOG_TARGET,
"Candidate Valid {:?}",
(candidate_hash, para_id),
?candidate_hash,
?para_id,
"Candidate Valid",
);
let _ = background_tx.send(BackgroundRequest::ApprovalVote(ApprovalVoteRequest {
......@@ -1782,8 +1788,8 @@ async fn issue_approval(
tracing::debug!(
target: LOG_TARGET,
"Issuing approval vote for candidate {:?}",
candidate_hash,
?candidate_hash,
"Issuing approval vote",
);
let actions = import_checked_approval(
......
......@@ -132,7 +132,7 @@ where
_keystore: &LocalKeystore,
_relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
_config: &criteria::Config,
_leaving_cores: Vec<(polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
_leaving_cores: Vec<(CandidateHash, polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
) -> HashMap<polkadot_primitives::v1::CoreIndex, criteria::OurAssignment> {
self.0()
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment