lib.rs 31 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.

// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.

//! The Network Bridge Subsystem - protocol multiplexer for Polkadot.

19
#![deny(unused_crate_dependencies)]
20
21
22
#![warn(missing_docs)]


23
use parity_scale_codec::{Encode, Decode};
24
use parking_lot::Mutex;
25
use futures::prelude::*;
26
use futures::stream::BoxStream;
27
use sc_network::Event as NetworkEvent;
28
use sp_consensus::SyncOracle;
29
30

use polkadot_subsystem::{
31
32
33
	ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
	Subsystem, SubsystemContext, SubsystemError, SubsystemResult, SubsystemSender,
	messages::StatementDistributionMessage
34
};
35
use polkadot_subsystem::messages::{
36
37
	NetworkBridgeMessage, AllMessages,
	CollatorProtocolMessage, NetworkBridgeEvent,
38
};
39
use polkadot_primitives::v1::{Hash, BlockNumber};
40
use polkadot_node_network_protocol::{
41
	PeerId, peer_set::PeerSet, View, v1 as protocol_v1, OurView, UnifiedReputationChange as Rep,
42
	ObservedRole,
43
};
44
use polkadot_node_subsystem_util::metrics::{self, prometheus};
45

46
47
48
/// Peer set infos for network initialization.
///
/// To be added to [`NetworkConfiguration::extra_sets`].
49
pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
50

51
use std::collections::{HashMap, hash_map, HashSet};
52
use std::sync::Arc;
53

54
55
mod validator_discovery;

56
57
58
59
/// Actual interfacing to the network based on the `Network` trait.
///
/// Defines the `Network` trait with an implementation for an `Arc<NetworkService>`.
mod network;
60
use network::{Network, send_message, get_peer_id_by_authority_id};
61

62
63
64
65
/// Request multiplexer for combining the multiple request sources into a single `Stream` of `AllMessages`.
mod multiplexer;
pub use multiplexer::RequestMultiplexer;

Andronik Ordian's avatar
Andronik Ordian committed
66
67
#[cfg(test)]
mod tests;
68

69
70
71
72
73
74
/// The maximum amount of heads a peer is allowed to have in their view at any time.
///
/// We use the same limit to compute the view sent to peers locally.
const MAX_VIEW_HEADS: usize = 5;


75
76
77
78
const MALFORMED_MESSAGE_COST: Rep = Rep::CostMajor("Malformed Network-bridge message");
const UNCONNECTED_PEERSET_COST: Rep = Rep::CostMinor("Message sent to un-connected peer-set");
const MALFORMED_VIEW_COST: Rep = Rep::CostMajor("Malformed view");
const EMPTY_VIEW_COST: Rep = Rep::CostMajor("Peer sent us an empty view");
79

80
// network bridge log target
81
const LOG_TARGET: &'static str = "parachain::network-bridge";
82

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
/// Metrics for the network bridge.
#[derive(Clone, Default)]
pub struct Metrics(Option<MetricsInner>);

impl Metrics {
	fn on_peer_connected(&self, peer_set: PeerSet) {
		self.0.as_ref().map(|metrics| metrics
			.connected_events
			.with_label_values(&[peer_set.get_protocol_name_static()])
			.inc()
		);
	}

	fn on_peer_disconnected(&self, peer_set: PeerSet) {
		self.0.as_ref().map(|metrics| metrics
			.disconnected_events
			.with_label_values(&[peer_set.get_protocol_name_static()])
			.inc()
		);
	}

	fn note_peer_count(&self, peer_set: PeerSet, count: usize) {
		self.0.as_ref().map(|metrics| metrics
			.peer_count
			.with_label_values(&[peer_set.get_protocol_name_static()])
			.set(count as u64)
		);
	}

	fn on_notification_received(&self, peer_set: PeerSet, size: usize) {
		if let Some(metrics) = self.0.as_ref() {
			metrics.notifications_received
				.with_label_values(&[peer_set.get_protocol_name_static()])
				.inc();

			metrics.bytes_received
				.with_label_values(&[peer_set.get_protocol_name_static()])
				.inc_by(size as u64);
		}
	}

	fn on_notification_sent(&self, peer_set: PeerSet, size: usize, to_peers: usize) {
		if let Some(metrics) = self.0.as_ref() {
			metrics.notifications_sent
				.with_label_values(&[peer_set.get_protocol_name_static()])
				.inc_by(to_peers as u64);

			metrics.bytes_sent
				.with_label_values(&[peer_set.get_protocol_name_static()])
				.inc_by((size * to_peers) as u64);
		}
	}
135
136
137
138
139
140
141
142

	fn note_desired_peer_count(&self, peer_set: PeerSet, size: usize) {
		self.0.as_ref().map(|metrics| metrics
			.desired_peer_count
			.with_label_values(&[peer_set.get_protocol_name_static()])
			.set(size as u64)
		);
	}
143
144
145
146
147
148
149
}

#[derive(Clone)]
struct MetricsInner {
	peer_count: prometheus::GaugeVec<prometheus::U64>,
	connected_events: prometheus::CounterVec<prometheus::U64>,
	disconnected_events: prometheus::CounterVec<prometheus::U64>,
150
	desired_peer_count: prometheus::GaugeVec<prometheus::U64>,
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

	notifications_received: prometheus::CounterVec<prometheus::U64>,
	notifications_sent: prometheus::CounterVec<prometheus::U64>,

	bytes_received: prometheus::CounterVec<prometheus::U64>,
	bytes_sent: prometheus::CounterVec<prometheus::U64>,
}

impl metrics::Metrics for Metrics {
	fn try_register(registry: &prometheus::Registry)
		-> std::result::Result<Self, prometheus::PrometheusError>
	{
		let metrics = MetricsInner {
			peer_count: prometheus::register(
				prometheus::GaugeVec::new(
					prometheus::Opts::new(
						"parachain_peer_count",
						"The number of peers on a parachain-related peer-set",
					),
					&["protocol"]
				)?,
				registry,
			)?,
			connected_events: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_peer_connect_events_total",
						"The number of peer connect events on a parachain notifications protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
			disconnected_events: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_peer_disconnect_events_total",
						"The number of peer disconnect events on a parachain notifications protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
194
195
196
197
198
199
200
201
202
203
			desired_peer_count: prometheus::register(
				prometheus::GaugeVec::new(
					prometheus::Opts::new(
						"parachain_desired_peer_count",
						"The number of peers that the local node is expected to connect to on a parachain-related peer-set",
					),
					&["protocol"]
				)?,
				registry,
			)?,
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
			notifications_received: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_notifications_received_total",
						"The number of notifications received on a parachain protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
			notifications_sent: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_notifications_sent_total",
						"The number of notifications sent on a parachain protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
			bytes_received: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_notification_bytes_received_total",
						"The number of bytes received on a parachain notification protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
			bytes_sent: prometheus::register(
				prometheus::CounterVec::new(
					prometheus::Opts::new(
						"parachain_notification_bytes_sent_total",
						"The number of bytes sent on a parachain notification protocol",
					),
					&["protocol"]
				)?,
				registry,
			)?,
		};

		Ok(Metrics(Some(metrics)))
	}
}

250
251
252
/// Messages from and to the network.
///
/// As transmitted to and received from subsystems.
253
#[derive(Debug, Encode, Decode, Clone)]
254
pub enum WireMessage<M> {
255
	/// A message from a peer on a specific protocol.
256
	#[codec(index = 1)]
257
	ProtocolMessage(M),
258
	/// A view update from a peer.
259
	#[codec(index = 2)]
260
261
262
263
	ViewUpdate(View),
}

/// The network bridge subsystem.
264
pub struct NetworkBridge<N, AD> {
265
	/// `Network` trait implementing type.
266
267
	network_service: N,
	authority_discovery_service: AD,
268
	request_multiplexer: RequestMultiplexer,
269
	sync_oracle: Box<dyn SyncOracle + Send>,
270
	metrics: Metrics,
271
}
272

273
274
impl<N, AD> NetworkBridge<N, AD> {
	/// Create a new network bridge subsystem with underlying network service and authority discovery service.
275
276
	///
	/// This assumes that the network service has had the notifications protocol for the network
277
	/// bridge already registered. See [`peers_sets_info`](peers_sets_info).
278
279
280
281
282
	pub fn new(
		network_service: N,
		authority_discovery_service: AD,
		request_multiplexer: RequestMultiplexer,
		sync_oracle: Box<dyn SyncOracle + Send>,
283
		metrics: Metrics,
284
	) -> Self {
285
286
287
		NetworkBridge {
			network_service,
			authority_discovery_service,
288
			request_multiplexer,
289
			sync_oracle,
290
			metrics,
291
		}
292
293
294
	}
}

295
impl<Net, AD, Context> Subsystem<Context> for NetworkBridge<Net, AD>
296
	where
297
		Net: Network + Sync,
298
		AD: validator_discovery::AuthorityDiscovery,
299
300
		Context: SubsystemContext<Message=NetworkBridgeMessage>,
{
301
302
303
304
305
	fn start(mut self, ctx: Context) -> SpawnedSubsystem {
		// The stream of networking events has to be created at initialization, otherwise the
		// networking might open connections before the stream of events has been grabbed.
		let network_stream = self.network_service.event_stream();

306
307
		// Swallow error because failure is fatal to the node and we log with more precision
		// within `run_network`.
308
		let future = run_network(self, ctx, network_stream)
309
310
311
312
313
314
315
			.map_err(|e| {
				SubsystemError::with_origin("network-bridge", e)
			})
			.boxed();
		SpawnedSubsystem {
			name: "network-bridge-subsystem",
			future,
316
		}
317
318
319
320
	}
}

struct PeerData {
Bernhard Schuster's avatar
Bernhard Schuster committed
321
	/// The Latest view sent by the peer.
322
323
324
	view: View,
}

325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
#[derive(Debug)]
enum UnexpectedAbort {
	/// Received error from overseer:
	SubsystemError(polkadot_subsystem::SubsystemError),
	/// The stream of incoming events concluded.
	EventStreamConcluded,
	/// The stream of incoming requests concluded.
	RequestStreamConcluded,
}

impl From<SubsystemError> for UnexpectedAbort {
	fn from(e: SubsystemError) -> Self {
		UnexpectedAbort::SubsystemError(e)
	}
}

#[derive(Default, Clone)]
struct Shared(Arc<Mutex<SharedInner>>);

#[derive(Default)]
struct SharedInner {
346
	local_view: Option<View>,
347
348
349
350
	validation_peers: HashMap<PeerId, PeerData>,
	collation_peers: HashMap<PeerId, PeerData>,
}

351
352
353
354
355
enum Mode {
	Syncing(Box<dyn SyncOracle + Send>),
	Active,
}

356
357
358
359
360
async fn handle_subsystem_messages<Context, N, AD>(
	mut ctx: Context,
	mut network_service: N,
	mut authority_discovery_service: AD,
	shared: Shared,
361
	sync_oracle: Box<dyn SyncOracle + Send>,
362
	metrics: Metrics,
363
) -> Result<(), UnexpectedAbort>
364
where
365
	Context: SubsystemContext<Message = NetworkBridgeMessage>,
366
	N: Network,
367
368
	AD: validator_discovery::AuthorityDiscovery,
{
369
370
	// This is kept sorted, descending, by block number.
	let mut live_heads: Vec<ActivatedLeaf> = Vec::with_capacity(MAX_VIEW_HEADS);
371
372
373
	let mut finalized_number = 0;
	let mut validator_discovery = validator_discovery::Service::<N, AD>::new();

374
375
	let mut mode = Mode::Syncing(sync_oracle);

376
377
378
379
380
381
	loop {
		futures::select! {
			msg = ctx.recv().fuse() => match msg {
				Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(active_leaves))) => {
					let ActiveLeavesUpdate { activated, deactivated } = active_leaves;
					tracing::trace!(
382
						target: LOG_TARGET,
383
384
385
						action = "ActiveLeaves",
						num_activated = %activated.len(),
						num_deactivated = %deactivated.len(),
386
					);
387
388
389
390
391
392
393
394
395
396

					for activated in activated {
						let pos = live_heads
							.binary_search_by(|probe| probe.number.cmp(&activated.number).reverse())
							.unwrap_or_else(|i| i);

						live_heads.insert(pos, activated);
					}
					live_heads.retain(|h| !deactivated.contains(&h.hash));

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
					// if we're done syncing, set the mode to `Mode::Active`.
					// Otherwise, we don't need to send view updates.
					{
						let is_done_syncing = match mode {
							Mode::Active => true,
							Mode::Syncing(ref mut sync_oracle) => !sync_oracle.is_major_syncing(),
						};

						if is_done_syncing {
							mode = Mode::Active;

							update_our_view(
								&mut network_service,
								&mut ctx,
								&live_heads,
								&shared,
								finalized_number,
414
								&metrics,
415
							);
416
417
						}
					}
418
				}
419
420
				Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number))) => {
					tracing::trace!(
421
						target: LOG_TARGET,
422
						action = "BlockFinalized"
423
					);
424

425
					debug_assert!(finalized_number < number);
426

427
428
429
430
431
432
433
434
435
436
					// we don't send the view updates here, but delay them until the next `ActiveLeaves`
					// otherwise it might break assumptions of some of the subsystems
					// that we never send the same `ActiveLeavesUpdate`
					finalized_number = number;
				}
				Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => {
					return Ok(());
				}
				Ok(FromOverseer::Communication { msg }) => match msg {
					NetworkBridgeMessage::ReportPeer(peer, rep) => {
437
438
439
						if !rep.is_benefit() {
							tracing::debug!(
								target: LOG_TARGET,
440
441
								?peer,
								?rep,
442
443
444
								action = "ReportPeer"
							);
						}
445
						network_service.report_peer(peer, rep);
446
447
448
449
450
451
452
453
					}
					NetworkBridgeMessage::DisconnectPeer(peer, peer_set) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "DisconnectPeer",
							?peer,
							peer_set = ?peer_set,
						);
454
						network_service.disconnect_peer(peer, peer_set);
455
456
457
458
459
460
461
462
463
464
					}
					NetworkBridgeMessage::SendValidationMessage(peers, msg) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "SendValidationMessages",
							num_messages = 1,
						);

						send_message(
							&mut network_service,
465
466
467
							peers,
							PeerSet::Validation,
							WireMessage::ProtocolMessage(msg),
468
							&metrics,
469
						);
470
471
472
473
474
475
476
477
478
479
480
481
482
483
					}
					NetworkBridgeMessage::SendValidationMessages(msgs) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "SendValidationMessages",
							num_messages = %msgs.len(),
						);

						for (peers, msg) in msgs {
							send_message(
								&mut network_service,
								peers,
								PeerSet::Validation,
								WireMessage::ProtocolMessage(msg),
484
								&metrics,
485
							);
486
487
488
489
490
491
492
493
494
495
496
						}
					}
					NetworkBridgeMessage::SendCollationMessage(peers, msg) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "SendCollationMessages",
							num_messages = 1,
						);

						send_message(
							&mut network_service,
497
498
499
							peers,
							PeerSet::Collation,
							WireMessage::ProtocolMessage(msg),
500
							&metrics,
501
						);
502
503
504
505
506
507
508
509
510
511
512
513
514
515
					}
					NetworkBridgeMessage::SendCollationMessages(msgs) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "SendCollationMessages",
							num_messages = %msgs.len(),
						);

						for (peers, msg) in msgs {
							send_message(
								&mut network_service,
								peers,
								PeerSet::Collation,
								WireMessage::ProtocolMessage(msg),
516
								&metrics,
517
							);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
						}
					}
					NetworkBridgeMessage::SendRequests(reqs, if_disconnected) => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "SendRequests",
							num_requests = %reqs.len(),
						);

						for req in reqs {
							network_service
								.start_request(&mut authority_discovery_service, req, if_disconnected)
								.await;
						}
					}
					NetworkBridgeMessage::ConnectToValidators {
						validator_ids,
						peer_set,
536
						failed,
537
538
539
540
541
542
543
544
545
					} => {
						tracing::trace!(
							target: LOG_TARGET,
							action = "ConnectToValidators",
							peer_set = ?peer_set,
							ids = ?validator_ids,
							"Received a validator connection request",
						);

546
547
						metrics.note_desired_peer_count(peer_set, validator_ids.len());

548
549
550
						let (ns, ads) = validator_discovery.on_request(
							validator_ids,
							peer_set,
551
							failed,
552
553
554
555
556
557
558
							network_service,
							authority_discovery_service,
						).await;

						network_service = ns;
						authority_discovery_service = ads;
					}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
					NetworkBridgeMessage::NewGossipTopology {
						our_neighbors,
					} => {
						tracing::debug!(
							target: LOG_TARGET,
							action = "NewGossipTopology",
							neighbors = our_neighbors.len(),
							"Gossip topology has changed",
						);

						let ads = &mut authority_discovery_service;
						let mut gossip_peers = HashSet::with_capacity(our_neighbors.len());
						for authority in our_neighbors {
							let addr = get_peer_id_by_authority_id(
								ads,
								authority.clone(),
							).await;

							if let Some(peer_id) = addr {
								gossip_peers.insert(peer_id);
							}
						}

						dispatch_validation_event_to_all_unbounded(
							NetworkBridgeEvent::NewGossipTopology(gossip_peers),
							ctx.sender(),
						);
					}
587
				}
588
589
590
591
592
				Err(e) => return Err(e.into()),
			},
		}
	}
}
593

594
async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
595
596
	mut sender: impl SubsystemSender,
	mut network_service: impl Network,
597
	network_stream: BoxStream<'static, NetworkEvent>,
598
	mut authority_discovery_service: AD,
599
	mut request_multiplexer: RequestMultiplexer,
600
	metrics: Metrics,
601
602
	shared: Shared,
) -> Result<(), UnexpectedAbort> {
603
	let mut network_stream = network_stream.fuse();
604
605
	loop {
		futures::select! {
606
			network_event = network_stream.next() => match network_event {
607
608
609
610
				None => return Err(UnexpectedAbort::EventStreamConcluded),
				Some(NetworkEvent::Dht(_))
				| Some(NetworkEvent::SyncConnected { .. })
				| Some(NetworkEvent::SyncDisconnected { .. }) => {}
611
				Some(NetworkEvent::NotificationStreamOpened { remote: peer, protocol, role, .. }) => {
612
613
614
615
616
617
					let role = ObservedRole::from(role);
					let peer_set = match PeerSet::try_from_protocol_name(&protocol) {
						None => continue,
						Some(peer_set) => peer_set,
					};

618
					tracing::debug!(
619
620
621
622
623
624
						target: LOG_TARGET,
						action = "PeerConnected",
						peer_set = ?peer_set,
						peer = ?peer,
						role = ?role
					);
625

626
627
628
629
630
631
632
633
634
635
636
637
638
639
					let local_view = {
						let mut shared = shared.0.lock();
						let peer_map = match peer_set {
							PeerSet::Validation => &mut shared.validation_peers,
							PeerSet::Collation => &mut shared.collation_peers,
						};

						match peer_map.entry(peer.clone()) {
							hash_map::Entry::Occupied(_) => continue,
							hash_map::Entry::Vacant(vacant) => {
								vacant.insert(PeerData { view: View::default() });
							}
						}

640
641
642
						metrics.on_peer_connected(peer_set);
						metrics.note_peer_count(peer_set, peer_map.len());

643
						shared.local_view.clone().unwrap_or(View::default())
644
					};
645

646
647
648
649
					let maybe_authority =
						authority_discovery_service
							.get_authority_id_by_peer_id(peer).await;

650
651
652
653
					match peer_set {
						PeerSet::Validation => {
							dispatch_validation_events_to_all(
								vec![
654
									NetworkBridgeEvent::PeerConnected(peer.clone(), role, maybe_authority),
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
									NetworkBridgeEvent::PeerViewChange(
										peer.clone(),
										View::default(),
									),
								],
								&mut sender,
							).await;

							send_message(
								&mut network_service,
								vec![peer],
								PeerSet::Validation,
								WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
									local_view,
								),
670
								&metrics,
671
							);
672
673
674
675
						}
						PeerSet::Collation => {
							dispatch_collation_events_to_all(
								vec![
676
									NetworkBridgeEvent::PeerConnected(peer.clone(), role, maybe_authority),
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
									NetworkBridgeEvent::PeerViewChange(
										peer.clone(),
										View::default(),
									),
								],
								&mut sender,
							).await;

							send_message(
								&mut network_service,
								vec![peer],
								PeerSet::Collation,
								WireMessage::<protocol_v1::CollationProtocol>::ViewUpdate(
									local_view,
								),
692
								&metrics,
693
							);
694
695
						}
					}
696
				}
697
698
699
700
701
				Some(NetworkEvent::NotificationStreamClosed { remote: peer, protocol }) => {
					let peer_set = match PeerSet::try_from_protocol_name(&protocol) {
						None => continue,
						Some(peer_set) => peer_set,
					};
702

703
					tracing::debug!(
704
705
706
707
708
						target: LOG_TARGET,
						action = "PeerDisconnected",
						peer_set = ?peer_set,
						peer = ?peer
					);
709

710
711
712
713
714
715
					let was_connected = {
						let mut shared = shared.0.lock();
						let peer_map = match peer_set {
							PeerSet::Validation => &mut shared.validation_peers,
							PeerSet::Collation => &mut shared.collation_peers,
						};
716

717
718
719
720
721
722
						let w = peer_map.remove(&peer).is_some();

						metrics.on_peer_disconnected(peer_set);
						metrics.note_peer_count(peer_set, peer_map.len());

						w
723
					};
724

725
					if was_connected {
726
						match peer_set {
727
728
729
730
731
732
733
734
							PeerSet::Validation => dispatch_validation_event_to_all(
								NetworkBridgeEvent::PeerDisconnected(peer),
								&mut sender,
							).await,
							PeerSet::Collation => dispatch_collation_event_to_all(
								NetworkBridgeEvent::PeerDisconnected(peer),
								&mut sender,
							).await,
735
736
737
						}
					}
				}
738
739
740
741
742
743
				Some(NetworkEvent::NotificationsReceived { remote, messages }) => {
					let v_messages: Result<Vec<_>, _> = messages
						.iter()
						.filter(|(protocol, _)| {
							protocol == &PeerSet::Validation.into_protocol_name()
						})
744
745
746
747
						.map(|(_, msg_bytes)| {
							WireMessage::decode(&mut msg_bytes.as_ref())
								.map(|m| (m, msg_bytes.len()))
						})
748
749
750
751
752
753
754
755
756
						.collect();

					let v_messages = match v_messages {
						Err(_) => {
							tracing::debug!(
								target: LOG_TARGET,
								action = "ReportPeer"
							);

757
							network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
758
759
760
761
762
763
764
765
766
767
							continue;
						}
						Ok(v) => v,
					};

					let c_messages: Result<Vec<_>, _> = messages
						.iter()
						.filter(|(protocol, _)| {
							protocol == &PeerSet::Collation.into_protocol_name()
						})
768
769
770
771
						.map(|(_, msg_bytes)| {
							WireMessage::decode(&mut msg_bytes.as_ref())
								.map(|m| (m, msg_bytes.len()))
						})
772
773
774
775
776
777
778
779
780
						.collect();

					match c_messages {
						Err(_) => {
							tracing::debug!(
								target: LOG_TARGET,
								action = "ReportPeer"
							);

781
							network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
							continue;
						}
						Ok(c_messages) => {
							if v_messages.is_empty() && c_messages.is_empty() {
								continue;
							} else {
								tracing::trace!(
									target: LOG_TARGET,
									action = "PeerMessages",
									peer = ?remote,
									num_validation_messages = %v_messages.len(),
									num_collation_messages = %c_messages.len()
								);

								if !v_messages.is_empty() {
									let (events, reports) = handle_peer_messages(
										remote.clone(),
799
										PeerSet::Validation,
800
801
										&mut shared.0.lock().validation_peers,
										v_messages,
802
										&metrics,
803
804
805
									);

									for report in reports {
806
										network_service.report_peer(remote.clone(), report);
807
808
809
810
811
812
813
814
									}

									dispatch_validation_events_to_all(events, &mut sender).await;
								}

								if !c_messages.is_empty() {
									let (events, reports) = handle_peer_messages(
										remote.clone(),
815
										PeerSet::Collation,
816
817
										&mut shared.0.lock().collation_peers,
										c_messages,
818
										&metrics,
819
820
821
									);

									for report in reports {
822
										network_service.report_peer(remote.clone(), report);
823
824
825
826
827
828
829
									}


									dispatch_collation_events_to_all(events, &mut sender).await;
								}
							}
						}
830
831
832
					}
				}
			},
833
834
835
			req_res_event = request_multiplexer.next().fuse() => match req_res_event {
				None => return Err(UnexpectedAbort::RequestStreamConcluded),
				Some(Err(err)) => {
836
					network_service.report_peer(err.peer, MALFORMED_MESSAGE_COST);
837
				}
838
839
				Some(Ok(msg)) => {
					sender.send_message(msg).await;
840
841
				}
			},
842
		}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
	}
}

/// Main driver, processing network events and messages from other subsystems.
///
/// THIS IS A HACK. We need to ensure we never hold the mutex across a `.await` boundary
/// and `parking_lot` currently does not provide `Send`, which helps us enforce that.
/// If this breaks, we need to find another way to protect ourselves.
///
/// ```compile_fail
/// #use parking_lot::MutexGuard;
/// #fn is_send<T: Send>();
/// #is_send::<parking_lot::MutexGuard<'static, ()>();
/// ```
async fn run_network<N, AD>(
	bridge: NetworkBridge<N, AD>,
	mut ctx: impl SubsystemContext<Message=NetworkBridgeMessage>,
860
	network_stream: BoxStream<'static, NetworkEvent>,
861
862
) -> SubsystemResult<()>
where
863
	N: Network,
864
865
866
867
868
869
	AD: validator_discovery::AuthorityDiscovery,
{
	let shared = Shared::default();

	let NetworkBridge {
		network_service,
870
		mut request_multiplexer,
871
		authority_discovery_service,
872
		metrics,
873
		sync_oracle,
874
	} = bridge;
875

876
877
878
879
	let statement_receiver = request_multiplexer
		.get_statement_fetching()
		.expect("Gets initialized, must be `Some` on startup. qed.");

880
881
882
	let (remote, network_event_handler) = handle_network_messages(
		ctx.sender().clone(),
		network_service.clone(),
883
		network_stream,
884
		authority_discovery_service.clone(),
885
		request_multiplexer,
886
		metrics.clone(),
887
888
889
890
891
		shared.clone(),
	).remote_handle();

	ctx.spawn("network-bridge-network-worker", Box::pin(remote)).await?;

892
893
894
895
	ctx.send_message(AllMessages::StatementDistribution(
		StatementDistributionMessage::StatementFetchingReceiver(statement_receiver)
	)).await;

896
897
898
899
900
	let subsystem_event_handler = handle_subsystem_messages(
		ctx,
		network_service,
		authority_discovery_service,
		shared,
901
		sync_oracle,
902
		metrics,
903
	);
904

905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
	futures::pin_mut!(subsystem_event_handler);

	match futures::future::select(subsystem_event_handler, network_event_handler)
		.await
		.factor_first()
		.0
	{
		Ok(()) => Ok(()),
		Err(UnexpectedAbort::SubsystemError(err)) => {
			tracing::warn!(
				target: LOG_TARGET,
				err = ?err,
				"Shutting down Network Bridge due to error"
			);

			Err(SubsystemError::Context(format!(
				"Received SubsystemError from overseer: {:?}",
				err
			)))
		}
		Err(UnexpectedAbort::EventStreamConcluded) => {
			tracing::info!(
				target: LOG_TARGET,
				"Shutting down Network Bridge: underlying request stream concluded"
			);
			Err(SubsystemError::Context(
				"Incoming network event stream concluded.".to_string(),
			))
		}
		Err(UnexpectedAbort::RequestStreamConcluded) => {
			tracing::info!(
				target: LOG_TARGET,
				"Shutting down Network Bridge: underlying request stream concluded"
			);
			Err(SubsystemError::Context(
				"Incoming network request stream concluded".to_string(),
			))
		}
943
944
945
	}
}

946
fn construct_view(live_heads: impl DoubleEndedIterator<Item = Hash>, finalized_number: BlockNumber) -> View {
947
	View::new(
948
949
		live_heads.take(MAX_VIEW_HEADS),
		finalized_number,
950
	)
951
952
}

953
fn update_our_view(
954
	net: &mut impl Network,
955
	ctx: &mut impl SubsystemContext<Message = NetworkBridgeMessage>,
956
	live_heads: &[ActivatedLeaf],
957
	shared: &Shared,
958
	finalized_number: BlockNumber,
959
	metrics: &Metrics,
960
) {
961
	let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number);
962

963
964
	let (validation_peers, collation_peers) = {
		let mut shared = shared.0.lock();
965

966
967
		// We only want to send a view update when the heads changed.
		// A change in finalized block number only is _not_ sufficient.
968
969
970
971
972
		//
		// If this is the first view update since becoming active, but our view is empty,
		// there is no need to send anything.
		match shared.local_view {
			Some(ref v) if v.check_heads_eq(&new_view) => {
973
				return;
974
975
976
			}
			None if live_heads.is_empty() => {
				shared.local_view = Some(new_view);
977
				return;
978
979
980
981
			}
			_ => {
				shared.local_view = Some(new_view.clone());
			}
982

983
		}
984
985
986
987
988
989

		(
			shared.validation_peers.keys().cloned().collect::<Vec<_>>(),
			shared.collation_peers.keys().cloned().collect::<Vec<_>>(),
		)
	};
990

991
992
	send_validation_message(
		net,
993
		validation_peers,
994
		WireMessage::ViewUpdate(new_view.clone()),
995
		metrics,
996
	);
997
998
999

	send_collation_message(
		net,
1000
		collation_peers,
For faster browsing, not all history is shown. View entire blame