lib.rs 54.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! The Statement Distribution Subsystem.
//!
//! This is responsible for distributing signed statements about candidate
//! validity amongst validators.

22
23
24
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]

25
26
use polkadot_subsystem::{
	Subsystem, SubsystemResult, SubsystemContext, SpawnedSubsystem,
27
	ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan,
28
	jaeger,
29
30
	messages::{
		AllMessages, NetworkBridgeMessage, StatementDistributionMessage, CandidateBackingMessage,
31
		RuntimeApiMessage, RuntimeApiRequest, NetworkBridgeEvent,
32
	},
33
};
34
use polkadot_node_subsystem_util::metrics::{self, prometheus};
35
use polkadot_node_primitives::{SignedFullStatement};
asynchronous rob's avatar
asynchronous rob committed
36
use polkadot_primitives::v1::{
37
	Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature, CandidateHash,
38
};
39
use polkadot_node_network_protocol::{
40
	v1 as protocol_v1, View, PeerId, OurView, UnifiedReputationChange as Rep,
41
};
42
43

use futures::prelude::*;
44
use futures::channel::oneshot;
45
46
47
48
use indexmap::IndexSet;

use std::collections::{HashMap, HashSet};

49
50
51
52
const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement");
const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature");
const COST_DUPLICATE_STATEMENT: Rep = Rep::CostMajorRepeated("Statement sent more than once by peer");
const COST_APPARENT_FLOOD: Rep = Rep::Malicious("Peer appears to be flooding us with statements");
53

54
55
const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement");
const BENEFIT_VALID_STATEMENT_FIRST: Rep = Rep::BenefitMajorFirst(
56
57
58
59
60
61
62
63
64
65
	"Peer was the first to provide a valid statement",
);

/// The maximum amount of candidates each validator is allowed to second at any relay-parent.
/// Short for "Validator Candidate Threshold".
///
/// This is the amount of candidates we keep per validator at any relay-parent.
/// Typically we will only keep 1, but when a validator equivocates we will need to track 2.
const VC_THRESHOLD: usize = 2;

66
const LOG_TARGET: &str = "parachain::statement-distribution";
67

68
/// The statement distribution subsystem.
69
70
71
72
pub struct StatementDistribution {
	// Prometheus metrics
	metrics: Metrics,
}
73
74
75
76
77
78
79

impl<C> Subsystem<C> for StatementDistribution
	where C: SubsystemContext<Message=StatementDistributionMessage>
{
	fn start(self, ctx: C) -> SpawnedSubsystem {
		// Swallow error because failure is fatal to the node and we log with more precision
		// within `run`.
80
81
		SpawnedSubsystem {
			name: "statement-distribution-subsystem",
82
			future: self.run(ctx).boxed(),
83
		}
84
85
86
	}
}

87
88
89
90
91
92
93
94
95
impl StatementDistribution {
	/// Create a new Statement Distribution Subsystem
	pub fn new(metrics: Metrics) -> StatementDistribution {
		StatementDistribution {
			metrics,
		}
	}
}

96
97
98
99
100
101
102
/// Tracks our impression of a single peer's view of the candidates a validator has seconded
/// for a given relay-parent.
///
/// It is expected to receive at most `VC_THRESHOLD` from us and be aware of at most `VC_THRESHOLD`
/// via other means.
#[derive(Default)]
struct VcPerPeerTracker {
103
104
	local_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
	remote_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
105
106
107
}

impl VcPerPeerTracker {
108
109
110
	/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
	/// based on a message that we have sent it from our local pool.
	fn note_local(&mut self, h: CandidateHash) {
111
		if !note_hash(&mut self.local_observed, h) {
112
			tracing::warn!("Statement distribution is erroneously attempting to distribute more \
113
114
115
116
				than {} candidate(s) per validator index. Ignoring", VC_THRESHOLD);
		}
	}

117
118
119
120
121
	/// Note that the remote should now be aware that a validator has seconded a given candidate (by hash)
	/// based on a message that it has sent us.
	///
	/// Returns `true` if the peer was allowed to send us such a message, `false` otherwise.
	fn note_remote(&mut self, h: CandidateHash) -> bool {
122
123
124
125
126
		note_hash(&mut self.remote_observed, h)
	}
}

fn note_hash(
127
128
	observed: &mut arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>,
	h: CandidateHash,
129
130
131
) -> bool {
	if observed.contains(&h) { return true; }

132
	observed.try_push(h).is_ok()
133
134
135
136
137
138
139
}

/// knowledge that a peer has about goings-on in a relay parent.
#[derive(Default)]
struct PeerRelayParentKnowledge {
	/// candidates that the peer is aware of. This indicates that we can
	/// send other statements pertaining to that candidate.
140
	known_candidates: HashSet<CandidateHash>,
141
142
143
144
145
146
147
148
149
	/// fingerprints of all statements a peer should be aware of: those that
	/// were sent to the peer by us.
	sent_statements: HashSet<(CompactStatement, ValidatorIndex)>,
	/// fingerprints of all statements a peer should be aware of: those that
	/// were sent to us by the peer.
	received_statements: HashSet<(CompactStatement, ValidatorIndex)>,
	/// How many candidates this peer is aware of for each given validator index.
	seconded_counts: HashMap<ValidatorIndex, VcPerPeerTracker>,
	/// How many statements we've received for each candidate that we're aware of.
150
	received_message_count: HashMap<CandidateHash, usize>,
151
152
153
154
155
156
157
158
159
160
161
162
163
164
}

impl PeerRelayParentKnowledge {
	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based
	/// on something that we would like to send to the peer.
	///
	/// This returns `None` if the peer cannot accept this statement, without altering internal
	/// state.
	///
	/// If the peer can accept the statement, this returns `Some` and updates the internal state.
	/// Once the knowledge has incorporated a statement, it cannot be incorporated again.
	///
	/// This returns `Some(true)` if this is the first time the peer has become aware of a
	/// candidate with the given hash.
165
	#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
166
167
168
169
170
171
172
173
174
	fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option<bool> {
		let already_known = self.sent_statements.contains(fingerprint)
			|| self.received_statements.contains(fingerprint);

		if already_known {
			return None;
		}

		let new_known = match fingerprint.0 {
175
			CompactStatement::Seconded(ref h) => {
176
177
178
179
180
181
				self.seconded_counts.entry(fingerprint.1)
					.or_default()
					.note_local(h.clone());

				self.known_candidates.insert(h.clone())
			},
182
			CompactStatement::Valid(ref h) => {
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
				// The peer can only accept Valid and Invalid statements for which it is aware
				// of the corresponding candidate.
				if !self.known_candidates.contains(h) {
					return None;
				}

				false
			}
		};

		self.sent_statements.insert(fingerprint.clone());

		Some(new_known)
	}

	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on
	/// a message we are receiving from the peer.
	///
	/// Provide the maximum message count that we can receive per candidate. In practice we should
	/// not receive more statements for any one candidate than there are members in the group assigned
	/// to that para, but this maximum needs to be lenient to account for equivocations that may be
	/// cross-group. As such, a maximum of 2 * n_validators is recommended.
	///
	/// This returns an error if the peer should not have sent us this message according to protocol
	/// rules for flood protection.
	///
	/// If this returns `Ok`, the internal state has been altered. After `receive`ing a new
	/// candidate, we are then cleared to send the peer further statements about that candidate.
	///
	/// This returns `Ok(true)` if this is the first time the peer has become aware of a
	/// candidate with given hash.
214
	#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
215
216
217
218
219
220
221
222
223
224
225
226
	fn receive(
		&mut self,
		fingerprint: &(CompactStatement, ValidatorIndex),
		max_message_count: usize,
	) -> Result<bool, Rep> {
		// We don't check `sent_statements` because a statement could be in-flight from both
		// sides at the same time.
		if self.received_statements.contains(fingerprint) {
			return Err(COST_DUPLICATE_STATEMENT);
		}

		let candidate_hash = match fingerprint.0 {
227
			CompactStatement::Seconded(ref h) => {
228
229
230
231
232
233
234
235
236
237
				let allowed_remote = self.seconded_counts.entry(fingerprint.1)
					.or_insert_with(Default::default)
					.note_remote(h.clone());

				if !allowed_remote {
					return Err(COST_UNEXPECTED_STATEMENT);
				}

				h
			}
238
			CompactStatement::Valid(ref h) => {
239
240
241
242
243
244
245
246
247
248
				if !self.known_candidates.contains(&h) {
					return Err(COST_UNEXPECTED_STATEMENT);
				}

				h
			}
		};

		{
			let received_per_candidate = self.received_message_count
249
				.entry(*candidate_hash)
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
				.or_insert(0);

			if *received_per_candidate >= max_message_count {
				return Err(COST_APPARENT_FLOOD);
			}

			*received_per_candidate += 1;
		}

		self.received_statements.insert(fingerprint.clone());
		Ok(self.known_candidates.insert(candidate_hash.clone()))
	}
}

struct PeerData {
	view: View,
	view_knowledge: HashMap<Hash, PeerRelayParentKnowledge>,
}

impl PeerData {
	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based
	/// on something that we would like to send to the peer.
	///
	/// This returns `None` if the peer cannot accept this statement, without altering internal
	/// state.
	///
	/// If the peer can accept the statement, this returns `Some` and updates the internal state.
	/// Once the knowledge has incorporated a statement, it cannot be incorporated again.
	///
	/// This returns `Some(true)` if this is the first time the peer has become aware of a
	/// candidate with the given hash.
281
	#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
	fn send(
		&mut self,
		relay_parent: &Hash,
		fingerprint: &(CompactStatement, ValidatorIndex),
	) -> Option<bool> {
		self.view_knowledge.get_mut(relay_parent).map_or(None, |k| k.send(fingerprint))
	}

	/// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on
	/// a message we are receiving from the peer.
	///
	/// Provide the maximum message count that we can receive per candidate. In practice we should
	/// not receive more statements for any one candidate than there are members in the group assigned
	/// to that para, but this maximum needs to be lenient to account for equivocations that may be
	/// cross-group. As such, a maximum of 2 * n_validators is recommended.
	///
	/// This returns an error if the peer should not have sent us this message according to protocol
	/// rules for flood protection.
	///
	/// If this returns `Ok`, the internal state has been altered. After `receive`ing a new
	/// candidate, we are then cleared to send the peer further statements about that candidate.
	///
	/// This returns `Ok(true)` if this is the first time the peer has become aware of a
	/// candidate with given hash.
306
	#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
307
308
309
310
311
312
	fn receive(
		&mut self,
		relay_parent: &Hash,
		fingerprint: &(CompactStatement, ValidatorIndex),
		max_message_count: usize,
	) -> Result<bool, Rep> {
313
314
315
		self.view_knowledge
			.get_mut(relay_parent)
			.ok_or(COST_UNEXPECTED_STATEMENT)?
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
			.receive(fingerprint, max_message_count)
	}
}

// A statement stored while a relay chain head is active.
#[derive(Debug)]
struct StoredStatement {
	comparator: StoredStatementComparator,
	statement: SignedFullStatement,
}

// A value used for comparison of stored statements to each other.
//
// The compact version of the statement, the validator index, and the signature of the validator
// is enough to differentiate between all types of equivocations, as long as the signature is
// actually checked to be valid. The same statement with 2 signatures and 2 statements with
// different (or same) signatures wll all be correctly judged to be unequal with this comparator.
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
struct StoredStatementComparator {
	compact: CompactStatement,
	validator_index: ValidatorIndex,
	signature: ValidatorSignature,
}

impl StoredStatement {
	fn compact(&self) -> &CompactStatement {
		&self.comparator.compact
	}

	fn fingerprint(&self) -> (CompactStatement, ValidatorIndex) {
		(self.comparator.compact.clone(), self.statement.validator_index())
	}
}

impl std::borrow::Borrow<StoredStatementComparator> for StoredStatement {
	fn borrow(&self) -> &StoredStatementComparator {
		&self.comparator
	}
}

impl std::hash::Hash for StoredStatement {
	fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
		self.comparator.hash(state)
	}
}

impl std::cmp::PartialEq for StoredStatement {
	fn eq(&self, other: &Self) -> bool {
		&self.comparator == &other.comparator
	}
}

impl std::cmp::Eq for StoredStatement {}

#[derive(Debug)]
enum NotedStatement<'a> {
	NotUseful,
	Fresh(&'a StoredStatement),
	UsefulButKnown
}

struct ActiveHeadData {
	/// All candidates we are aware of for this head, keyed by hash.
379
	candidates: HashSet<CandidateHash>,
380
381
382
383
384
385
386
387
388
389
390
	/// Stored statements for circulation to peers.
	///
	/// These are iterable in insertion order, and `Seconded` statements are always
	/// accepted before dependent statements.
	statements: IndexSet<StoredStatement>,
	/// The validators at this head.
	validators: Vec<ValidatorId>,
	/// The session index this head is at.
	session_index: sp_staking::SessionIndex,
	/// How many `Seconded` statements we've seen per validator.
	seconded_counts: HashMap<ValidatorIndex, usize>,
391
	/// A Jaeger span for this head, so we can attach data to it.
392
	span: PerLeafSpan,
393
394
395
}

impl ActiveHeadData {
396
397
398
	fn new(
		validators: Vec<ValidatorId>,
		session_index: sp_staking::SessionIndex,
399
		span: PerLeafSpan,
400
	) -> Self {
401
402
403
404
405
406
		ActiveHeadData {
			candidates: Default::default(),
			statements: Default::default(),
			validators,
			session_index,
			seconded_counts: Default::default(),
407
			span,
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
		}
	}

	/// Note the given statement.
	///
	/// If it was not already known and can be accepted,  returns `NotedStatement::Fresh`,
	/// with a handle to the statement.
	///
	/// If it can be accepted, but we already know it, returns `NotedStatement::UsefulButKnown`.
	///
	/// We accept up to `VC_THRESHOLD` (2 at time of writing) `Seconded` statements
	/// per validator. These will be the first ones we see. The statement is assumed
	/// to have been checked, including that the validator index is not out-of-bounds and
	/// the signature is valid.
	///
	/// Any other statements or those that reference a candidate we are not aware of cannot be accepted
	/// and will return `NotedStatement::NotUseful`.
425
	#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
426
427
428
429
430
431
432
433
434
435
436
437
438
439
	fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement {
		let validator_index = statement.validator_index();
		let comparator = StoredStatementComparator {
			compact: statement.payload().to_compact(),
			validator_index,
			signature: statement.signature().clone(),
		};

		let stored = StoredStatement {
			comparator: comparator.clone(),
			statement,
		};

		match comparator.compact {
440
			CompactStatement::Seconded(h) => {
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
				let seconded_so_far = self.seconded_counts.entry(validator_index).or_insert(0);
				if *seconded_so_far >= VC_THRESHOLD {
					return NotedStatement::NotUseful;
				}

				self.candidates.insert(h);
				if self.statements.insert(stored) {
					*seconded_so_far += 1;

					// This will always return `Some` because it was just inserted.
					NotedStatement::Fresh(self.statements.get(&comparator)
						.expect("Statement was just inserted; qed"))
				} else {
					NotedStatement::UsefulButKnown
				}
			}
457
			CompactStatement::Valid(h) => {
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
				if !self.candidates.contains(&h) {
					return NotedStatement::NotUseful;
				}

				if self.statements.insert(stored) {
					// This will always return `Some` because it was just inserted.
					NotedStatement::Fresh(self.statements.get(&comparator)
						.expect("Statement was just inserted; qed"))
				} else {
					NotedStatement::UsefulButKnown
				}
			}
		}
	}

	/// Get an iterator over all statements for the active head. Seconded statements come first.
	fn statements(&self) -> impl Iterator<Item = &'_ StoredStatement> + '_ {
		self.statements.iter()
	}

	/// Get an iterator over all statements for the active head that are for a particular candidate.
479
	fn statements_about(&self, candidate_hash: CandidateHash)
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
		-> impl Iterator<Item = &'_ StoredStatement> + '_
	{
		self.statements().filter(move |s| s.compact().candidate_hash() == &candidate_hash)
	}
}

/// Check a statement signature under this parent hash.
fn check_statement_signature(
	head: &ActiveHeadData,
	relay_parent: Hash,
	statement: &SignedFullStatement,
) -> Result<(), ()> {
	let signing_context = SigningContext {
		session_index: head.session_index,
		parent_hash: relay_parent,
	};

497
	head.validators.get(statement.validator_index().0 as usize)
498
499
500
501
502
503
504
505
		.ok_or(())
		.and_then(|v| statement.check_signature(&signing_context, v))
}

/// Places the statement in storage if it is new, and then
/// circulates the statement to all peers who have not seen it yet, and
/// sends all statements dependent on that statement to peers who could previously not receive
/// them but now can.
506
#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))]
507
508
509
510
511
512
async fn circulate_statement_and_dependents(
	peers: &mut HashMap<PeerId, PeerData>,
	active_heads: &mut HashMap<Hash, ActiveHeadData>,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
	relay_parent: Hash,
	statement: SignedFullStatement,
513
	metrics: &Metrics,
514
) {
515
516
517
518
	let active_head = match active_heads.get_mut(&relay_parent) {
		Some(res) => res,
		None => return,
	};
519

520
521
522
523
	let _span = active_head.span.child_builder("circulate-statement")
		.with_candidate(&statement.payload().candidate_hash())
		.with_stage(jaeger::Stage::StatementDistribution)
		.build();
524

525
526
527
528
529
530
531
532
533
534
535
536
	// First circulate the statement directly to all peers needing it.
	// The borrow of `active_head` needs to encompass only this (Rust) statement.
	let outputs: Option<(CandidateHash, Vec<PeerId>)> = {
		match active_head.note_statement(statement) {
			NotedStatement::Fresh(stored) => Some((
				*stored.compact().candidate_hash(),
				circulate_statement(peers, ctx, relay_parent, stored).await,
			)),
			_ => None,
		}
	};

537
	let _span = _span.child("send-to-peers");
538
539
540
541
	// Now send dependent statements to all peers needing them, if any.
	if let Some((candidate_hash, peers_needing_dependents)) = outputs {
		for peer in peers_needing_dependents {
			if let Some(peer_data) = peers.get_mut(&peer) {
542
543
544
				let _span_loop = _span.child_builder("to-peer")
					.with_peer_id(&peer)
					.build();
545
546
547
548
549
550
551
552
553
554
555
				// defensive: the peer data should always be some because the iterator
				// of peers is derived from the set of peers.
				send_statements_about(
					peer,
					peer_data,
					ctx,
					relay_parent,
					candidate_hash,
					&*active_head,
					metrics,
				).await;
556
557
558
559
560
			}
		}
	}
}

561
562
563
564
565
566
567
568
fn statement_message(relay_parent: Hash, statement: SignedFullStatement)
	-> protocol_v1::ValidationProtocol
{
	protocol_v1::ValidationProtocol::StatementDistribution(
		protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement)
	)
}

569
570
/// Circulates a statement to all peers who have not seen it yet, and returns
/// an iterator over peers who need to have dependent statements sent.
571
#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))]
572
573
574
575
576
async fn circulate_statement(
	peers: &mut HashMap<PeerId, PeerData>,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
	relay_parent: Hash,
	stored: &StoredStatement,
577
) -> Vec<PeerId> {
578
579
580
581
582
583
584
585
586
587
588
589
	let fingerprint = stored.fingerprint();

	let mut peers_to_send = HashMap::new();

	for (peer, data) in peers.iter_mut() {
		if let Some(new_known) = data.send(&relay_parent, &fingerprint) {
			peers_to_send.insert(peer.clone(), new_known);
		}
	}

	// Send all these peers the initial statement.
	if !peers_to_send.is_empty() {
590
591
		let payload = statement_message(relay_parent, stored.statement.clone());
		ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
592
593
			peers_to_send.keys().cloned().collect(),
			payload,
594
		))).await;
595
596
	}

597
	peers_to_send.into_iter().filter_map(|(peer, needs_dependent)| if needs_dependent {
598
599
600
		Some(peer)
	} else {
		None
601
	}).collect()
602
603
604
}

/// Send all statements about a given candidate hash to a peer.
605
#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))]
606
607
608
609
610
async fn send_statements_about(
	peer: PeerId,
	peer_data: &mut PeerData,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
	relay_parent: Hash,
611
	candidate_hash: CandidateHash,
612
	active_head: &ActiveHeadData,
613
	metrics: &Metrics,
614
) {
615
616
	for statement in active_head.statements_about(candidate_hash) {
		if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() {
617
			let payload = statement_message(
618
619
				relay_parent,
				statement.statement.clone(),
620
			);
621

622
623
			ctx.send_message(AllMessages::NetworkBridge(
				NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload)
624
			)).await;
625
626

			metrics.on_statement_distributed();
627
628
629
630
631
		}
	}
}

/// Send all statements at a given relay-parent to a peer.
632
#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))]
633
634
635
636
637
async fn send_statements(
	peer: PeerId,
	peer_data: &mut PeerData,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
	relay_parent: Hash,
638
639
	active_head: &ActiveHeadData,
	metrics: &Metrics,
640
) {
641
642
	for statement in active_head.statements() {
		if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() {
643
			let payload = statement_message(
644
645
				relay_parent,
				statement.statement.clone(),
646
			);
647

648
649
			ctx.send_message(AllMessages::NetworkBridge(
				NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload)
650
			)).await;
651
652

			metrics.on_statement_distributed();
653
654
655
656
657
658
659
660
		}
	}
}

async fn report_peer(
	ctx: &mut impl SubsystemContext,
	peer: PeerId,
	rep: Rep,
661
) {
662
663
664
665
666
667
668
669
670
	ctx.send_message(AllMessages::NetworkBridge(
		NetworkBridgeMessage::ReportPeer(peer, rep)
	)).await
}

// Handle an incoming wire message. Returns a reference to a newly-stored statement
// if we were not already aware of it, along with the corresponding relay-parent.
//
// This function checks the signature and ensures the statement is compatible with our
671
// view. It also notifies candidate backing if the statement was previously unknown.
672
#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))]
673
674
675
676
677
678
async fn handle_incoming_message<'a>(
	peer: PeerId,
	peer_data: &mut PeerData,
	our_view: &View,
	active_heads: &'a mut HashMap<Hash, ActiveHeadData>,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
679
	message: protocol_v1::StatementDistributionMessage,
680
	metrics: &Metrics,
681
) -> Option<(Hash, &'a StoredStatement)> {
682
683
	let (relay_parent, statement) = match message {
		protocol_v1::StatementDistributionMessage::Statement(r, s) => (r, s),
684
685
686
	};

	if !our_view.contains(&relay_parent) {
687
688
		report_peer(ctx, peer, COST_UNEXPECTED_STATEMENT).await;
		return None;
689
690
691
692
693
694
695
	}

	let active_head = match active_heads.get_mut(&relay_parent) {
		Some(h) => h,
		None => {
			// This should never be out-of-sync with our view if the view updates
			// correspond to actual `StartWork` messages. So we just log and ignore.
696
697
698
699
			tracing::warn!(
				requested_relay_parent = %relay_parent,
				"our view out-of-sync with active heads; head not found",
			);
700
			return None;
701
702
703
		}
	};

704
	let candidate_hash = statement.payload().candidate_hash();
705
706
707
708
	let handle_incoming_span = active_head.span.child_builder("handle-incoming")
		.with_candidate(&candidate_hash)
		.with_peer_id(&peer)
		.build();
709

710
711
	// check the signature on the statement.
	if let Err(()) = check_statement_signature(&active_head, relay_parent, &statement) {
712
713
		report_peer(ctx, peer, COST_INVALID_SIGNATURE).await;
		return None;
714
715
716
717
718
719
720
721
722
723
	}

	// Ensure the statement is stored in the peer data.
	//
	// Note that if the peer is sending us something that is not within their view,
	// it will not be kept within their log.
	let fingerprint = (statement.payload().to_compact(), statement.validator_index());
	let max_message_count = active_head.validators.len() * 2;
	match peer_data.receive(&relay_parent, &fingerprint, max_message_count) {
		Err(rep) => {
724
725
			report_peer(ctx, peer, rep).await;
			return None;
726
727
728
729
730
731
732
733
734
		}
		Ok(true) => {
			// Send the peer all statements concerning the candidate that we have,
			// since it appears to have just learned about the candidate.
			send_statements_about(
				peer.clone(),
				peer_data,
				ctx,
				relay_parent,
735
				candidate_hash,
736
				&*active_head,
737
				metrics,
738
			).await;
739
740
741
742
743
744
745
		}
		Ok(false) => {}
	}

	// Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation
	// or unpinned to a seconded candidate. So it is safe to place it into the storage.
	match active_head.note_statement(statement) {
746
		NotedStatement::NotUseful => None,
747
		NotedStatement::UsefulButKnown => {
748
749
			report_peer(ctx, peer, BENEFIT_VALID_STATEMENT).await;
			None
750
751
		}
		NotedStatement::Fresh(statement) => {
752
			report_peer(ctx, peer, BENEFIT_VALID_STATEMENT_FIRST).await;
753
754
755
756
757
758
759
760
761
762

			let mut _span = handle_incoming_span.child("notify-backing");

			// When we receive a new message from a peer, we forward it to the
			// candidate backing subsystem.
			let message = AllMessages::CandidateBacking(
				CandidateBackingMessage::Statement(relay_parent, statement.statement.clone())
			);
			ctx.send_message(message).await;

763
			Some((relay_parent, statement))
764
765
766
767
768
		}
	}
}

/// Update a peer's view. Sends all newly unlocked statements based on the previous
769
#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))]
770
771
772
773
774
775
async fn update_peer_view_and_send_unlocked(
	peer: PeerId,
	peer_data: &mut PeerData,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
	active_heads: &HashMap<Hash, ActiveHeadData>,
	new_view: View,
776
	metrics: &Metrics,
777
) {
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
	let old_view = std::mem::replace(&mut peer_data.view, new_view);

	// Remove entries for all relay-parents in the old view but not the new.
	for removed in old_view.difference(&peer_data.view) {
		let _ = peer_data.view_knowledge.remove(removed);
	}

	// Add entries for all relay-parents in the new view but not the old.
	// Furthermore, send all statements we have for those relay parents.
	let new_view = peer_data.view.difference(&old_view).copied().collect::<Vec<_>>();
	for new in new_view.iter().copied() {
		peer_data.view_knowledge.insert(new, Default::default());

		if let Some(active_head) = active_heads.get(&new) {
			send_statements(
				peer.clone(),
				peer_data,
				ctx,
				new,
				active_head,
798
				metrics,
799
			).await;
800
801
802
803
		}
	}
}

804
#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(subsystem = LOG_TARGET))]
805
806
807
808
async fn handle_network_update(
	peers: &mut HashMap<PeerId, PeerData>,
	active_heads: &mut HashMap<Hash, ActiveHeadData>,
	ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
809
	our_view: &mut OurView,
810
	update: NetworkBridgeEvent<protocol_v1::StatementDistributionMessage>,
811
	metrics: &Metrics,
812
) {
813
814
815
816
817
818
819
820
821
822
823
	match update {
		NetworkBridgeEvent::PeerConnected(peer, _role) => {
			peers.insert(peer, PeerData {
				view: Default::default(),
				view_knowledge: Default::default(),
			});
		}
		NetworkBridgeEvent::PeerDisconnected(peer) => {
			peers.remove(&peer);
		}
		NetworkBridgeEvent::PeerMessage(peer, message) => {
824
			let handled_incoming = match peers.get_mut(&peer) {
825
				Some(data) => {
826
					handle_incoming_message(
827
828
829
830
831
832
						peer,
						data,
						&*our_view,
						active_heads,
						ctx,
						message,
833
						metrics,
834
					).await
835
				}
836
837
				None => None,
			};
838

839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
			// if we got a fresh message, we need to circulate it to all peers.
			if let Some((relay_parent, statement)) = handled_incoming {
				// we can ignore the set of peers who this function returns as now expecting
				// dependent statements.
				//
				// we have the invariant in this subsystem that we never store a `Valid` or `Invalid`
				// statement before a `Seconded` statement. `Seconded` statements are the only ones
				// that require dependents. Thus, if this is a `Seconded` statement for a candidate we
				// were not aware of before, we cannot have any dependent statements from the candidate.
				let _ = circulate_statement(
					peers,
					ctx,
					relay_parent,
					statement,
				).await;
			}
855
856
857
858
859
860
861
862
863
864
		}
		NetworkBridgeEvent::PeerViewChange(peer, view) => {
			match peers.get_mut(&peer) {
				Some(data) => {
					update_peer_view_and_send_unlocked(
						peer,
						data,
						ctx,
						&*active_heads,
						view,
865
						metrics,
866
867
					).await
				}
868
				None => (),
869
870
871
872
873
874
875
876
			}
		}
		NetworkBridgeEvent::OurViewChange(view) => {
			let old_view = std::mem::replace(our_view, view);
			active_heads.retain(|head, _| our_view.contains(head));

			for new in our_view.difference(&old_view) {
				if !active_heads.contains_key(&new) {
877
878
879
880
					tracing::warn!(
						target: LOG_TARGET,
						unknown_hash = %new,
						"Our network bridge view update \
881
						inconsistent with `StartWork` messages we have received from overseer. \
882
883
						Contains unknown hash.",
					);
884
885
886
887
888
889
890
				}
			}
		}
	}

}

891
impl StatementDistribution {
892
	#[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))]
893
894
895
896
897
	async fn run(
		self,
		mut ctx: impl SubsystemContext<Message = StatementDistributionMessage>,
	) -> SubsystemResult<()> {
		let mut peers: HashMap<PeerId, PeerData> = HashMap::new();
898
		let mut our_view = OurView::default();
899
900
901
902
903
904
905
		let mut active_heads: HashMap<Hash, ActiveHeadData> = HashMap::new();
		let metrics = self.metrics;

		loop {
			let message = ctx.recv().await?;
			match message {
				FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. })) => {
906
907
					let _timer = metrics.time_active_leaves_update();

908
909
910
					for (relay_parent, span) in activated {
						let span = PerLeafSpan::new(span, "statement-distribution");

911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
						let (validators, session_index) = {
							let (val_tx, val_rx) = oneshot::channel();
							let (session_tx, session_rx) = oneshot::channel();

							let val_message = AllMessages::RuntimeApi(
								RuntimeApiMessage::Request(
									relay_parent,
									RuntimeApiRequest::Validators(val_tx),
								),
							);
							let session_message = AllMessages::RuntimeApi(
								RuntimeApiMessage::Request(
									relay_parent,
									RuntimeApiRequest::SessionIndexForChild(session_tx),
								),
							);

							ctx.send_messages(
								std::iter::once(val_message).chain(std::iter::once(session_message))
930
							).await;
931
932
933
934

							match (val_rx.await?, session_rx.await?) {
								(Ok(v), Ok(s)) => (v, s),
								(Err(e), _) | (_, Err(e)) => {
935
									tracing::warn!(
936
										target: LOG_TARGET,
937
938
										err = ?e,
										"Failed to fetch runtime API data for active leaf",
939
940
941
942
943
944
945
									);

									// Lacking this bookkeeping might make us behave funny, although
									// not in any slashable way. But we shouldn't take down the node
									// on what are likely spurious runtime API errors.
									continue;
								}
946
							}
947
						};
948

949
						active_heads.entry(relay_parent)
950
							.or_insert(ActiveHeadData::new(validators, session_index, span));
951
					}
952
				}
953
				FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {
954
					// do nothing
955
				}
956
957
958
				FromOverseer::Signal(OverseerSignal::Conclude) => break,
				FromOverseer::Communication { msg } => match msg {
					StatementDistributionMessage::Share(relay_parent, statement) => {
959
960
						let _timer = metrics.time_share();

961
962
963
964
965
966
967
						circulate_statement_and_dependents(
							&mut peers,
							&mut active_heads,
							&mut ctx,
							relay_parent,
							statement,
							&metrics,
968
						).await;
969
					}
970
971
972
					StatementDistributionMessage::NetworkBridgeUpdateV1(event) => {
						let _timer = metrics.time_network_bridge_update_v1();

973
974
975
976
977
978
979
						handle_network_update(
							&mut peers,
							&mut active_heads,
							&mut ctx,
							&mut our_view,
							event,
							&metrics,
980
						).await;
981
					}
982
				}
983
984
			}
		}
985
986
987
988
989
990
991
		Ok(())
	}
}

#[derive(Clone)]
struct MetricsInner {
	statements_distributed: prometheus::Counter<prometheus::U64>,
992
993
994
	active_leaves_update: prometheus::Histogram,
	share: prometheus::Histogram,
	network_bridge_update_v1: prometheus::Histogram,
995
996
997
998
999
1000
}

/// Statement Distribution metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);

For faster browsing, not all history is shown. View entire blame