Skip to content
Snippets Groups Projects
Unverified Commit ec3a61ed authored by Sebastian Kunert's avatar Sebastian Kunert Committed by GitHub
Browse files

Remove pov-recovery race condition/Improve zombienet test (#2526)

The test was a bit flaky on CI. 

There was a race condition in the pov-recovery system. If the timing is
bad, it can happen that a block waits for a parent that is already
queued for import. The check if a block has children waiting happens
when we insert into the import queue. So we need to do an additional
check once we receive the import notification for the parent block.

Second issue is that `alice` was missing `--in-peers 0` and `--out-peers
0`, so alice was sometimes still fetching block via sync and the
assertion on the logs in zombienet would fail.

There is another potential issue that I saw once locally. We have a
failing pov-recovery queue that fails from time to time to check that
the retry mechanism does what it should. We now make sure that the same
candidate is never failed twice, so the tests become more predictable.
parent b0b4451f
Branches
No related merge requests found
Pipeline #419392 failed with stages
in 1 hour, 4 minutes, and 42 seconds
......@@ -410,6 +410,7 @@ where
?block_hash,
parent_hash = ?parent,
parent_scheduled_for_recovery,
waiting_blocks = self.waiting_for_parent.len(),
"Waiting for recovery of parent.",
);
......@@ -442,13 +443,13 @@ where
_ => (),
}
self.import_block(block).await;
self.import_block(block);
}
/// Import the given `block`.
///
/// This will also recursivley drain `waiting_for_parent` and import them as well.
async fn import_block(&mut self, block: Block) {
fn import_block(&mut self, block: Block) {
let mut blocks = VecDeque::new();
tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), "Importing block retrieved using pov_recovery");
......@@ -551,7 +552,6 @@ where
};
futures::pin_mut!(pending_candidates);
loop {
select! {
pending_candidate = pending_candidates.next() => {
......@@ -573,6 +573,17 @@ where
imported = imported_blocks.next() => {
if let Some(imported) = imported {
self.clear_waiting_recovery(&imported.hash);
// We need to double check that no blocks are waiting for this block.
// Can happen when a waiting child block is queued to wait for parent while the parent block is still
// in the import queue.
if let Some(waiting_blocks) = self.waiting_for_parent.remove(&imported.hash) {
for block in waiting_blocks {
tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), resolved_parent = ?imported.hash, "Found new waiting child block during import, queuing.");
self.import_block(block);
}
};
} else {
tracing::debug!(target: LOG_TARGET, "Imported blocks stream ended");
return;
......
......@@ -27,6 +27,7 @@ mod genesis;
use runtime::AccountId;
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use std::{
collections::HashSet,
future::Future,
net::{IpAddr, Ipv4Addr, SocketAddr},
time::Duration,
......@@ -57,7 +58,7 @@ use cumulus_test_runtime::{Hash, Header, NodeBlock as Block, RuntimeApi};
use frame_system_rpc_runtime_api::AccountNonceApi;
use polkadot_node_subsystem::{errors::RecoveryError, messages::AvailabilityRecoveryMessage};
use polkadot_overseer::Handle as OverseerHandle;
use polkadot_primitives::{CollatorPair, Hash as PHash, PersistedValidationData};
use polkadot_primitives::{CandidateHash, CollatorPair, Hash as PHash, PersistedValidationData};
use polkadot_service::ProvideRuntimeApi;
use sc_consensus::ImportQueue;
use sc_network::{
......@@ -144,12 +145,13 @@ pub type TransactionPool = Arc<sc_transaction_pool::FullPool<Block, Client>>;
pub struct FailingRecoveryHandle {
overseer_handle: OverseerHandle,
counter: u32,
failed_hashes: HashSet<CandidateHash>,
}
impl FailingRecoveryHandle {
/// Create a new FailingRecoveryHandle
pub fn new(overseer_handle: OverseerHandle) -> Self {
Self { overseer_handle, counter: 0 }
Self { overseer_handle, counter: 0, failed_hashes: Default::default() }
}
}
......@@ -160,11 +162,15 @@ impl RecoveryHandle for FailingRecoveryHandle {
message: AvailabilityRecoveryMessage,
origin: &'static str,
) {
// For every 5th block we immediately signal unavailability to trigger
// a retry.
if self.counter % 5 == 0 {
let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _) = message;
let candidate_hash = receipt.hash();
// For every 3rd block we immediately signal unavailability to trigger
// a retry. The same candidate is never failed multiple times to ensure progress.
if self.counter % 3 == 0 && self.failed_hashes.insert(candidate_hash) {
tracing::info!(target: LOG_TARGET, ?candidate_hash, "Failing pov recovery.");
let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, back_sender) = message;
tracing::info!(target: LOG_TARGET, "Failing pov recovery.");
back_sender
.send(Err(RecoveryError::Unavailable))
.expect("Return channel should work here.");
......
......@@ -34,13 +34,12 @@ add_to_genesis = false
args = ["--disable-block-announcements"]
# run 'alice' as a parachain collator who does not produce blocks
# 'alice' is a bootnode for 'bob' and 'charlie'
[[parachains.collators]]
name = "alice"
validator = true # collator
image = "{{COL_IMAGE}}"
command = "test-parachain"
args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"]
args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"]
# run 'charlie' as a parachain full node
[[parachains.collators]]
......@@ -48,7 +47,7 @@ add_to_genesis = false
validator = false # full node
image = "{{COL_IMAGE}}"
command = "test-parachain"
args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0","--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"]
args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"]
# we fail recovery for 'eve' from time to time to test retries
[[parachains.collators]]
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment