Unverified Commit c429e15c authored by Andronik Ordian's avatar Andronik Ordian Committed by GitHub
Browse files

refactor View to include finalized_number (#2128)

* refactor View to include finalized_number

* guide: update the NetworkBridge on BlockFinalized

* av-store: fix the tests

* actually fix tests

* grumbles

* ignore macro doctest

* use Hash::repeat_bytes more consistently

* broadcast empty leaves updates as well

* fix issuing view updates on empty leaves updates
parent d3a0c571
Pipeline #117500 passed with stages
in 26 minutes
......@@ -145,7 +145,7 @@ impl CollationGenerationSubsystem {
}
false
}
Ok(Signal(BlockFinalized(_))) => false,
Ok(Signal(BlockFinalized(..))) => false,
Err(err) => {
tracing::error!(
target: LOG_TARGET,
......
......@@ -538,8 +538,8 @@ where
process_block_activated(ctx, &subsystem.inner, activated, &subsystem.metrics).await?;
}
}
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => {
process_block_finalized(subsystem, ctx, &subsystem.inner, hash).await?;
FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number)) => {
process_block_finalized(subsystem, &subsystem.inner, number).await?;
}
FromOverseer::Communication { msg } => {
process_message(subsystem, ctx, msg).await?;
......@@ -564,20 +564,14 @@ where
/// The state of data has to be changed from
/// `CandidateState::Included` to `CandidateState::Finalized` and their pruning times have
/// to be updated to `now` + keep_finalized_{block, chunk}_for`.
#[tracing::instrument(level = "trace", skip(subsystem, ctx, db), fields(subsystem = LOG_TARGET))]
async fn process_block_finalized<Context>(
#[tracing::instrument(level = "trace", skip(subsystem, db), fields(subsystem = LOG_TARGET))]
async fn process_block_finalized(
subsystem: &AvailabilityStoreSubsystem,
ctx: &mut Context,
db: &Arc<dyn KeyValueDB>,
hash: Hash,
) -> Result<(), Error>
where
Context: SubsystemContext<Message=AvailabilityStoreMessage>
{
block_number: BlockNumber,
) -> Result<(), Error> {
let _timer = subsystem.metrics.time_process_block_finalized();
let block_number = get_block_number(ctx, hash).await?;
if let Some(mut pov_pruning) = pov_pruning(db) {
// Since the records are sorted by time in which they need to be pruned and not by block
// numbers we have to iterate through the whole collection here.
......
......@@ -274,7 +274,7 @@ fn store_block_works() {
let test_state = TestState::default();
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = CandidateHash(Hash::from([1; 32]));
let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let validator_index = 5;
let n_validators = 10;
......@@ -328,7 +328,7 @@ fn store_pov_and_query_chunk_works() {
test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = CandidateHash(Hash::from([1; 32]));
let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let n_validators = 10;
let pov = PoV {
......@@ -543,20 +543,9 @@ fn stored_data_kept_until_finalized() {
overseer_signal(
&mut virtual_overseer,
OverseerSignal::BlockFinalized(new_leaf)
OverseerSignal::BlockFinalized(new_leaf, 10)
).await;
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::ChainApi(ChainApiMessage::BlockNumber(
hash,
tx,
)) => {
assert_eq!(hash, new_leaf);
tx.send(Ok(Some(10))).unwrap();
}
);
// Wait for a half of the time finalized data should be available for
Delay::new(test_state.pruning_config.keep_finalized_block_for / 2).await;
......@@ -658,20 +647,9 @@ fn stored_chunk_kept_until_finalized() {
overseer_signal(
&mut virtual_overseer,
OverseerSignal::BlockFinalized(new_leaf)
OverseerSignal::BlockFinalized(new_leaf, 10)
).await;
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::ChainApi(ChainApiMessage::BlockNumber(
hash,
tx,
)) => {
assert_eq!(hash, new_leaf);
tx.send(Ok(Some(10))).unwrap();
}
);
// Wait for a half of the time finalized data should be available for
Delay::new(test_state.pruning_config.keep_finalized_block_for / 2).await;
......@@ -812,21 +790,9 @@ fn forkfullness_works() {
overseer_signal(
&mut virtual_overseer,
OverseerSignal::BlockFinalized(new_leaf_1)
OverseerSignal::BlockFinalized(new_leaf_1, 5)
).await;
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::ChainApi(ChainApiMessage::BlockNumber(
hash,
tx,
)) => {
assert_eq!(hash, new_leaf_1);
tx.send(Ok(Some(5))).unwrap();
}
);
// Data of both candidates should be still present in the DB.
assert_eq!(
query_available_data(&mut virtual_overseer, candidate_1_hash).await.unwrap(),
......
......@@ -1157,7 +1157,7 @@ mod tests {
let mut head_data = HashMap::new();
head_data.insert(chain_a, HeadData(vec![4, 5, 6]));
let relay_parent = Hash::from([5; 32]);
let relay_parent = Hash::repeat_byte(5);
let signing_context = SigningContext {
session_index: 1,
......
......@@ -95,7 +95,7 @@ async fn run(
loop {
match ctx.recv().await? {
FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {}
FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}
FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOverseer::Communication { msg } => match msg {
CandidateValidationMessage::ValidateFromChainState(
......
......@@ -89,7 +89,7 @@ where
match ctx.recv().await? {
FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {},
FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {},
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {},
FromOverseer::Communication { msg } => match msg {
ChainApiMessage::BlockNumber(hash, response_channel) => {
let _timer = subsystem.metrics.time_block_number();
......
......@@ -152,7 +152,7 @@ async fn run<Client>(
req = ctx.recv().fuse() => match req? {
FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {},
FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {},
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {},
FromOverseer::Communication { msg } => match msg {
RuntimeApiMessage::Request(relay_parent, request) => {
subsystem.spawn_request(relay_parent, request);
......
......@@ -416,7 +416,7 @@ where
.filter(|(_peer, view)| {
// collect all direct interests of a peer w/o ancestors
state
.cached_live_candidates_unioned(view.0.iter())
.cached_live_candidates_unioned(view.heads.iter())
.contains_key(&candidate_hash)
})
.map(|(peer, _view)| peer.clone())
......@@ -619,7 +619,7 @@ where
let _timer = metrics.time_process_incoming_peer_message();
// obtain the set of candidates we are interested in based on our current view
let live_candidates = state.cached_live_candidates_unioned(state.view.0.iter());
let live_candidates = state.cached_live_candidates_unioned(state.view.heads.iter());
// check if the candidate is of interest
let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash) {
......@@ -707,7 +707,7 @@ where
.filter(|(_peer, view)| {
// peers view must contain the candidate hash too
state
.cached_live_candidates_unioned(view.0.iter())
.cached_live_candidates_unioned(view.heads.iter())
.contains_key(&message_id.0)
})
.map(|(peer, _)| -> PeerId { peer.clone() })
......@@ -781,7 +781,7 @@ impl AvailabilityDistributionSubsystem {
})) => {
// handled at view change
}
FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}
FromOverseer::Signal(OverseerSignal::Conclude) => {
return Ok(());
}
......
......@@ -17,7 +17,7 @@
use super::*;
use assert_matches::assert_matches;
use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
use polkadot_node_network_protocol::ObservedRole;
use polkadot_node_network_protocol::{view, ObservedRole};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_primitives::v1::{
AvailableData, BlockData, CandidateCommitments, CandidateDescriptor, GroupIndex,
......@@ -33,11 +33,6 @@ use sp_application_crypto::AppKey;
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use std::{sync::Arc, time::Duration};
macro_rules! view {
( $( $hash:expr ),* $(,)? ) => [
View(vec![ $( $hash.clone() ),* ])
];
}
macro_rules! delay {
($delay:expr) => {
......
......@@ -212,8 +212,8 @@ impl BitfieldDistribution {
// defer the cleanup to the view change
}
}
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => {
tracing::trace!(target: LOG_TARGET, hash = %hash, "block finalized");
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash, number)) => {
tracing::trace!(target: LOG_TARGET, hash = %hash, number = %number, "block finalized");
}
FromOverseer::Signal(OverseerSignal::Conclude) => {
tracing::trace!(target: LOG_TARGET, "Conclude");
......@@ -770,13 +770,7 @@ mod test {
use std::sync::Arc;
use std::time::Duration;
use assert_matches::assert_matches;
use polkadot_node_network_protocol::ObservedRole;
macro_rules! view {
( $( $hash:expr ),* $(,)? ) => [
View(vec![ $( $hash.clone() ),* ])
];
}
use polkadot_node_network_protocol::{view, ObservedRole};
macro_rules! launch {
($fut:expr) => {
......@@ -833,7 +827,7 @@ mod test {
let validator = SyncCryptoStore::sr25519_generate_new(&*keystore, ValidatorId::ID, None)
.expect("generating sr25519 key not to fail");
state.per_relay_parent = view.0.iter().map(|relay_parent| {(
state.per_relay_parent = view.heads.iter().map(|relay_parent| {(
relay_parent.clone(),
PerRelayParentData {
signing_context: signing_context.clone(),
......
......@@ -37,7 +37,7 @@ use polkadot_subsystem::messages::{
BitfieldDistributionMessage, PoVDistributionMessage, StatementDistributionMessage,
CollatorProtocolMessage,
};
use polkadot_primitives::v1::{AuthorityDiscoveryId, Block, Hash};
use polkadot_primitives::v1::{AuthorityDiscoveryId, Block, Hash, BlockNumber};
use polkadot_node_network_protocol::{
ObservedRole, ReputationChange, PeerId, PeerSet, View, NetworkBridgeEvent, v1 as protocol_v1
};
......@@ -254,6 +254,7 @@ enum Action {
ReportPeer(PeerId, ReputationChange),
ActiveLeaves(ActiveLeavesUpdate),
BlockFinalized(BlockNumber),
PeerConnected(PeerSet, PeerId, ObservedRole),
PeerDisconnected(PeerSet, PeerId),
......@@ -274,6 +275,8 @@ fn action_from_overseer_message(
match res {
Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(active_leaves)))
=> Action::ActiveLeaves(active_leaves),
Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number)))
=> Action::BlockFinalized(number),
Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => Action::Abort,
Ok(FromOverseer::Communication { msg }) => match msg {
NetworkBridgeMessage::ReportPeer(peer, rep) => Action::ReportPeer(peer, rep),
......@@ -284,8 +287,6 @@ fn action_from_overseer_message(
NetworkBridgeMessage::ConnectToValidators { validator_ids, connected }
=> Action::ConnectToValidators { validator_ids, connected },
},
Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_)))
=> Action::Nop,
Err(e) => {
tracing::warn!(target: LOG_TARGET, err = ?e, "Shutting down Network Bridge due to error");
Action::Abort
......@@ -348,20 +349,24 @@ fn action_from_network_message(event: Option<NetworkEvent>) -> Action {
}
}
fn construct_view(live_heads: &[Hash]) -> View {
View(live_heads.iter().rev().take(MAX_VIEW_HEADS).cloned().collect())
fn construct_view(live_heads: &[Hash], finalized_number: BlockNumber) -> View {
View {
heads: live_heads.iter().rev().take(MAX_VIEW_HEADS).cloned().collect(),
finalized_number
}
}
#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))]
async fn update_view(
async fn update_our_view(
net: &mut impl Network,
ctx: &mut impl SubsystemContext<Message = NetworkBridgeMessage>,
live_heads: &[Hash],
local_view: &mut View,
finalized_number: BlockNumber,
validation_peers: &HashMap<PeerId, PeerData>,
collation_peers: &HashMap<PeerId, PeerData>,
) -> SubsystemResult<()> {
let new_view = construct_view(live_heads);
let new_view = construct_view(live_heads, finalized_number);
if *local_view == new_view { return Ok(()) }
*local_view = new_view.clone();
......@@ -413,7 +418,7 @@ async fn handle_peer_messages<M>(
for message in messages {
outgoing_messages.push(match message {
WireMessage::ViewUpdate(new_view) => {
if new_view.0.len() > MAX_VIEW_HEADS {
if new_view.heads.len() > MAX_VIEW_HEADS {
net.report_peer(
peer.clone(),
MALFORMED_VIEW_COST,
......@@ -580,7 +585,8 @@ where
// Most recent heads are at the back.
let mut live_heads: Vec<Hash> = Vec::with_capacity(MAX_VIEW_HEADS);
let mut local_view = View(Vec::new());
let mut local_view = View::default();
let mut finalized_number = 0;
let mut validation_peers: HashMap<PeerId, PeerData> = HashMap::new();
let mut collation_peers: HashMap<PeerId, PeerData> = HashMap::new();
......@@ -638,16 +644,27 @@ where
live_heads.extend(activated);
live_heads.retain(|h| !deactivated.contains(h));
update_view(
update_our_view(
&mut network_service,
&mut ctx,
&live_heads,
&mut local_view,
finalized_number,
&validation_peers,
&collation_peers,
).await?;
}
Action::BlockFinalized(number) => {
debug_assert!(finalized_number < number);
// we don't send the view updates here, but delay them until the next `Action::ActiveLeaves`
// otherwise it might break assumptions of some of the subsystems
// that we never send the same `ActiveLeavesUpdate`
// this is fine, we will get `Action::ActiveLeaves` on block finalization anyway
finalized_number = number;
},
Action::PeerConnected(peer_set, peer, role) => {
let peer_map = match peer_set {
PeerSet::Validation => &mut validation_peers,
......@@ -660,7 +677,7 @@ where
hash_map::Entry::Occupied(_) => continue,
hash_map::Entry::Vacant(vacant) => {
let _ = vacant.insert(PeerData {
view: View(Vec::new()),
view: View::default(),
});
match peer_set {
......@@ -669,7 +686,7 @@ where
NetworkBridgeEvent::PeerConnected(peer.clone(), role),
NetworkBridgeEvent::PeerViewChange(
peer,
View(Default::default()),
View::default(),
),
],
&mut ctx,
......@@ -679,7 +696,7 @@ where
NetworkBridgeEvent::PeerConnected(peer.clone(), role),
NetworkBridgeEvent::PeerViewChange(
peer,
View(Default::default()),
View::default(),
),
],
&mut ctx,
......@@ -753,6 +770,7 @@ mod tests {
use polkadot_node_subsystem_test_helpers::{
SingleItemSink, SingleItemStream, TestSubsystemContextHandle,
};
use polkadot_node_network_protocol::view;
use sc_network::Multiaddr;
use sp_keyring::Sr25519Keyring;
......@@ -978,7 +996,7 @@ mod tests {
ObservedRole::Full,
).await;
let hash_a = Hash::from([1; 32]);
let hash_a = Hash::repeat_byte(1);
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(hash_a)))
......@@ -986,7 +1004,7 @@ mod tests {
let actions = network_handle.next_network_actions(2).await;
let wire_message = WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
View(vec![hash_a])
view![hash_a]
).encode();
assert!(network_actions_contains(
......@@ -1021,7 +1039,7 @@ mod tests {
network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await;
let view = View(vec![Hash::from([1u8; 32])]);
let view = view![Hash::repeat_byte(1)];
// bridge will inform about all connected peers.
{
......@@ -1031,7 +1049,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1075,7 +1093,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1140,7 +1158,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1152,7 +1170,7 @@ mod tests {
).await;
assert_sends_collation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1166,7 +1184,7 @@ mod tests {
// to show that we're still connected on the collation protocol, send a view update.
let hash_a = Hash::from([1; 32]);
let hash_a = Hash::repeat_byte(1);
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(hash_a)))
......@@ -1174,7 +1192,7 @@ mod tests {
let actions = network_handle.next_network_actions(1).await;
let wire_message = WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
View(vec![hash_a])
view![hash_a]
).encode();
assert!(network_actions_contains(
......@@ -1210,7 +1228,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1222,7 +1240,7 @@ mod tests {
).await;
assert_sends_collation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer_b.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer_b.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1295,7 +1313,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
......@@ -1307,13 +1325,13 @@ mod tests {
).await;
assert_sends_collation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
&mut virtual_overseer,
).await;
}
let view_a = View(vec![[1; 32].into()]);
let view_b = View(vec![[2; 32].into()]);
let view_a = view![Hash::repeat_byte(1)];
let view_b = view![Hash::repeat_byte(2)];
network_handle.peer_message(
peer.clone(),
......@@ -1339,6 +1357,74 @@ mod tests {
});
}
#[test]
fn sent_views_include_finalized_number_update() {
test_harness(|test_harness| async move {
let TestHarness { mut network_handle, mut virtual_overseer } = test_harness;
let peer_a = PeerId::random();
network_handle.connect_peer(
peer_a.clone(),
PeerSet::Validation,
ObservedRole::Full,
).await;
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
let hash_c = Hash::repeat_byte(3);
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash_a, 1))
).await;
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(hash_b)))
).await;
let actions = network_handle.next_network_actions(1).await;
let wire_message = WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
View {
heads: vec![hash_b],
finalized_number: 1,
}
).encode();
assert!(network_actions_contains(
&actions,
&NetworkAction::WriteNotification(
peer_a.clone(),
PeerSet::Validation,
wire_message.clone(),
),
));
// view updates are issued even when `ActiveLeavesUpdate` is empty
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash_c, 3))
).await;
virtual_overseer.send(
FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::default()))
).await;
let actions = network_handle.next_network_actions(1).await;
let wire_message = WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
View {
heads: vec![hash_b],
finalized_number: 3,
}
).encode();
assert!(network_actions_contains(
&actions,
&NetworkAction::WriteNotification(
peer_a,
PeerSet::Validation,
wire_message.clone(),
),
));
});
}
#[test]
fn send_messages_to_peers() {
test_harness(|test_harness| async move {
......@@ -1360,7 +1446,7 @@ mod tests {
).await;
assert_sends_validation_event_to_all(
NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())),