diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index ac74af0f5ca9478a01cfbafb622d25f4efe88397..9403e471b0f277a13d6b5f9ebffe678f2529a2c2 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -63,7 +63,7 @@ pub mod message;
 pub mod event;
 pub mod sync;
 
-pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError};
+pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError};
 
 const REQUEST_TIMEOUT_SEC: u64 = 40;
 /// Interval at which we perform time based maintenance
@@ -1668,7 +1668,7 @@ impl<B: BlockT, H: ExHashT> NetworkBehaviour for Protocol<B, H> {
 					notifications_sink,
 				}
 			},
-			GenericProtoOut::CustomProtocolClosed { peer_id, .. } => {
+			GenericProtoOut::CustomProtocolClosed { peer_id } => {
 				self.on_peer_disconnected(peer_id)
 			},
 			GenericProtoOut::LegacyMessage { peer_id, message } =>
diff --git a/substrate/client/network/src/protocol/generic_proto.rs b/substrate/client/network/src/protocol/generic_proto.rs
index 3133471b0d2493cb0e21b2f6d5d1264f23565f59..4d6e607a146e7877e5ab1781f0a4e5b8d2b5d56a 100644
--- a/substrate/client/network/src/protocol/generic_proto.rs
+++ b/substrate/client/network/src/protocol/generic_proto.rs
@@ -21,7 +21,7 @@
 //! network, then performs the Substrate protocol handling on top.
 
 pub use self::behaviour::{GenericProto, GenericProtoOut};
-pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError};
+pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready};
 
 mod behaviour;
 mod handler;
diff --git a/substrate/client/network/src/protocol/generic_proto/behaviour.rs b/substrate/client/network/src/protocol/generic_proto/behaviour.rs
index 7b62b154016c3cf829b766f76078392212f6033b..f84aead47283a376afe1333eb06578d863f8796b 100644
--- a/substrate/client/network/src/protocol/generic_proto/behaviour.rs
+++ b/substrate/client/network/src/protocol/generic_proto/behaviour.rs
@@ -42,45 +42,35 @@ use wasm_timer::Instant;
 
 /// Network behaviour that handles opening substreams for custom protocols with other peers.
 ///
-/// ## Legacy vs new protocol
-///
-/// The `GenericProto` behaves as following:
-///
-/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in
-/// the source code) on that connection. This substream name depends on the `protocol_id` and
-/// `versions` passed at initialization. If the remote refuses this substream, we close the
-/// connection.
-///
-/// - For each registered protocol, we also open an additional substream for this protocol. If the
-/// remote refuses this substream, then it's fine.
-///
-/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy
-/// substream, or `write_notification` to indicate a registered protocol. If the registered
-/// protocol was refused or isn't supported by the remote, we always use the legacy instead.
-///
-/// ## How it works
+/// # How it works
 ///
 /// The role of the `GenericProto` is to synchronize the following components:
 ///
 /// - The libp2p swarm that opens new connections and reports disconnects.
-/// - The connection handler (see `handler.rs`) that handles individual connections.
+/// - The connection handler (see `group.rs`) that handles individual connections.
 /// - The peerset manager (PSM) that requests links to peers to be established or broken.
 /// - The external API, that requires knowledge of the links that have been established.
 ///
-/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed,
-/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the
-/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the
-/// connection handlers of that peer. The Open/Closed component must be in sync with the external
-/// API.
+/// In the state machine below, each `PeerId` is attributed one of these states:
+///
+/// - [`PeerState::Requested`]: No open connection, but requested by the peerset. Currently dialing.
+/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream
+///   is open.
+/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset.
+///   - Notifications substreams are open on at least one connection, and external
+///     API has been notified.
+///   - Notifications substreams aren't open.
+/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams.
+///   Peerset has been asked to attribute an inbound slot.
 ///
-/// However, a connection handler for a peer only exists if we are actually connected to that peer.
-/// What this means is that there are six possible states for each peer: Disconnected, Dialing
-/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed.
-/// Most notably, the Dialing state must correspond to a "link established" state in the peerset
-/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a
-/// peer or connected to it.
+/// In addition to these states, there also exists a "banning" system. If we fail to dial a peer,
+/// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently
+/// backed-off, the next dialing attempt is delayed until after the ban expires. However, the PSM
+/// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense:
+/// if a backed-off peer tries to connect, the connection is accepted. A ban only delays dialing
+/// attempts.
 ///
-/// There may be multiple connections to a peer. However, the status of a peer on
+/// There may be multiple connections to a peer. The status of a peer on
 /// the API of this behaviour and towards the peerset manager is aggregated in
 /// the following way:
 ///
@@ -94,9 +84,9 @@ use wasm_timer::Instant;
 ///      in terms of potential reordering and dropped messages. Messages can
 ///      be received on any connection.
 ///   3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the
-///      first connection reports `NotifsHandlerOut::Open`.
+///      first connection reports `NotifsHandlerOut::OpenResultOk`.
 ///   4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the
-///      last connection reports `NotifsHandlerOut::Closed`.
+///      last connection reports `NotifsHandlerOut::ClosedResult`.
 ///
 /// In this way, the number of actual established connections to the peer is
 /// an implementation detail of this behaviour. Note that, in practice and at
@@ -104,12 +94,6 @@ use wasm_timer::Instant;
 /// and only as a result of simultaneous dialing. However, the implementation
 /// accommodates for any number of connections.
 ///
-/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for
-/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next
-/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider
-/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer
-/// tries to connect, the connection is accepted. A ban only delays dialing attempts.
-///
 pub struct GenericProto {
 	/// `PeerId` of the local node.
 	local_peer_id: PeerId,
@@ -157,6 +141,8 @@ pub struct GenericProto {
 struct DelayId(u64);
 
 /// State of a peer we're connected to.
+///
+/// The variants correspond to the state of the peer w.r.t. the peerset.
 #[derive(Debug)]
 enum PeerState {
 	/// State is poisoned. This is a temporary state for a peer and we should always switch back
@@ -166,9 +152,11 @@ enum PeerState {
 
 	/// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial
 	/// delay to the connection.
-	Banned {
-		/// Until when the peer is banned.
-		until: Instant,
+	Backoff {
+		/// When the ban expires. For clean-up purposes. References an entry in `delays`.
+		timer: DelayId,
+		/// Until when the peer is backed-off.
+		timer_deadline: Instant,
 	},
 
 	/// The peerset requested that we connect to this peer. We are currently not connected.
@@ -182,40 +170,54 @@ enum PeerState {
 	/// The peerset requested that we connect to this peer. We are currently dialing this peer.
 	Requested,
 
-	/// We are connected to this peer but the peerset refused it.
+	/// We are connected to this peer but the peerset hasn't requested it or has denied it.
 	///
-	/// We may still have ongoing traffic with that peer, but it should cease shortly.
+	/// The handler is either in the closed state, or a `Close` message has been sent to it and
+	/// hasn't been answered yet.
 	Disabled {
-		/// The connections that are currently open for custom protocol traffic.
-		open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>,
-		/// If `Some`, any dial attempts to this peer are delayed until the given `Instant`.
-		banned_until: Option<Instant>,
+		/// If `Some`, any connection request from the peerset to this peer is delayed until the
+		/// given `Instant`.
+		backoff_until: Option<Instant>,
+
+		/// List of connections with this peer, and their state.
+		connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>,
 	},
 
-	/// We are connected to this peer but we are not opening any Substrate substream. The handler
-	/// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such,
-	/// but should get disconnected in a few seconds.
+	/// We are connected to this peer. The peerset has requested a connection to this peer, but
+	/// it is currently in a "backed-off" phase. The state will switch to `Enabled` once the timer
+	/// expires.
+	///
+	/// The handler is either in the closed state, or a `Close` message has been sent to it and
+	/// hasn't been answered yet.
+	///
+	/// The handler will be opened when `timer` fires.
 	DisabledPendingEnable {
-		/// The connections that are currently open for custom protocol traffic.
-		open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>,
 		/// When to enable this remote. References an entry in `delays`.
 		timer: DelayId,
 		/// When the `timer` will trigger.
 		timer_deadline: Instant,
+
+		/// List of connections with this peer, and their state.
+		connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>,
 	},
 
-	/// We are connected to this peer and the peerset has accepted it. The handler is in the
-	/// enabled state.
+	/// We are connected to this peer and the peerset has accepted it.
 	Enabled {
-		/// The connections that are currently open for custom protocol traffic.
-		open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>,
+		/// List of connections with this peer, and their state.
+		connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>,
 	},
 
-	/// We received an incoming connection from this peer and forwarded that
-	/// connection request to the peerset. The connection handlers are waiting
-	/// for initialisation, i.e. to be enabled or disabled based on whether
-	/// the peerset accepts or rejects the peer.
-	Incoming,
+	/// We are connected to this peer. We have received an `OpenDesiredByRemote` from one of the
+	/// handlers and forwarded that request to the peerset. The connection handlers are waiting for
+	/// a response, i.e. to be opened or closed based on whether the peerset accepts or rejects
+	/// the peer.
+	Incoming {
+		/// If `Some`, any dial attempts to this peer are delayed until the given `Instant`.
+		backoff_until: Option<Instant>,
+
+		/// List of connections with this peer, and their state.
+		connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>,
+	},
 }
 
 impl PeerState {
@@ -229,18 +231,19 @@ impl PeerState {
 	/// that is open for custom protocol traffic.
 	fn get_open(&self) -> Option<&NotificationsSink> {
 		match self {
-			PeerState::Disabled { open, .. } |
-			PeerState::DisabledPendingEnable { open, .. } |
-			PeerState::Enabled { open, .. } =>
-				if !open.is_empty() {
-					Some(&open[0].1)
-				} else {
-					None
-				}
+			PeerState::Enabled { connections, .. } => connections
+				.iter()
+				.filter_map(|(_, s)| match s {
+					ConnectionState::Open(s) => Some(s),
+					_ => None,
+				})
+				.next(),
 			PeerState::Poisoned => None,
-			PeerState::Banned { .. } => None,
+			PeerState::Backoff { .. } => None,
 			PeerState::PendingRequest { .. } => None,
 			PeerState::Requested => None,
+			PeerState::Disabled { .. } => None,
+			PeerState::DisabledPendingEnable { .. } => None,
 			PeerState::Incoming { .. } => None,
 		}
 	}
@@ -249,7 +252,7 @@ impl PeerState {
 	fn is_requested(&self) -> bool {
 		match self {
 			PeerState::Poisoned => false,
-			PeerState::Banned { .. } => false,
+			PeerState::Backoff { .. } => false,
 			PeerState::PendingRequest { .. } => true,
 			PeerState::Requested => true,
 			PeerState::Disabled { .. } => false,
@@ -260,6 +263,37 @@ impl PeerState {
 	}
 }
 
+/// State of the handler of a single connection visible from this state machine.
+#[derive(Debug)]
+enum ConnectionState {
+	/// Connection is in the `Closed` state, meaning that the remote hasn't requested anything.
+	Closed,
+
+	/// Connection is either in the `Open` or the `Closed` state, but a
+	/// [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be
+	/// acknowledged through a [`NotifsHandlerOut::CloseResult`].
+	Closing,
+
+	/// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message has been sent.
+	/// An `OpenResultOk`/`OpenResultErr` message is expected.
+	Opening,
+
+	/// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a
+	/// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message
+	/// followed with a `CloseResult` message are expected.
+	OpeningThenClosing,
+
+	/// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesiredByRemote`]
+	/// message has been received, meaning that the remote wants to open a substream.
+	OpenDesiredByRemote,
+
+	/// Connection is in the `Open` state.
+	///
+	/// The external API is notified of a channel with this peer if any of its connection is in
+	/// this state.
+	Open(NotificationsSink),
+}
+
 /// State of an "incoming" message sent to the peer set manager.
 #[derive(Debug)]
 struct IncomingPeer {
@@ -303,8 +337,6 @@ pub enum GenericProtoOut {
 	CustomProtocolClosed {
 		/// Id of the peer we were connected to.
 		peer_id: PeerId,
-		/// Reason why the substream closed, for debugging purposes.
-		reason: Cow<'static, str>,
 	},
 
 	/// Receives a message on the legacy substream.
@@ -438,46 +470,79 @@ impl GenericProto {
 			st @ PeerState::Disabled { .. } => *entry.into_mut() = st,
 			st @ PeerState::Requested => *entry.into_mut() = st,
 			st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st,
-			st @ PeerState::Banned { .. } => *entry.into_mut() = st,
+			st @ PeerState::Backoff { .. } => *entry.into_mut() = st,
 
 			// DisabledPendingEnable => Disabled.
 			PeerState::DisabledPendingEnable {
-				open,
+				connections,
 				timer_deadline,
 				timer: _
 			} => {
 				debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id);
 				self.peerset.dropped(peer_id.clone());
-				let banned_until = Some(if let Some(ban) = ban {
+				let backoff_until = Some(if let Some(ban) = ban {
 					cmp::max(timer_deadline, Instant::now() + ban)
 				} else {
 					timer_deadline
 				});
 				*entry.into_mut() = PeerState::Disabled {
-					open,
-					banned_until
+					connections,
+					backoff_until
 				}
 			},
 
 			// Enabled => Disabled.
-			PeerState::Enabled { open } => {
+			// All open or opening connections are sent a `Close` message.
+			// If relevant, the external API is instantly notified.
+			PeerState::Enabled { mut connections } => {
 				debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id);
 				self.peerset.dropped(peer_id.clone());
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: peer_id.clone(),
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Disable,
-				});
-				let banned_until = ban.map(|dur| Instant::now() + dur);
+
+				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
+					debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id);
+					let event = GenericProtoOut::CustomProtocolClosed {
+						peer_id: peer_id.clone(),
+					};
+					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+				}
+
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::Closing;
+				}
+
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::Opening))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::OpeningThenClosing;
+				}
+
+				debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))));
+				debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)));
+
+				let backoff_until = ban.map(|dur| Instant::now() + dur);
 				*entry.into_mut() = PeerState::Disabled {
-					open,
-					banned_until
+					connections,
+					backoff_until
 				}
 			},
 
 			// Incoming => Disabled.
-			PeerState::Incoming => {
+			// Ongoing opening requests from the remote are rejected.
+			PeerState::Incoming { mut connections, backoff_until } => {
 				let inc = if let Some(inc) = self.incoming.iter_mut()
 					.find(|i| i.peer_id == *entry.key() && i.alive) {
 					inc
@@ -488,16 +553,30 @@ impl GenericProto {
 				};
 
 				inc.alive = false;
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: peer_id.clone(),
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Disable,
-				});
-				let banned_until = ban.map(|dur| Instant::now() + dur);
+
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", peer_id, *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::Closing;
+				}
+
+				let backoff_until = match (backoff_until, ban) {
+					(Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)),
+					(Some(a), None) => Some(a),
+					(None, Some(b)) => Some(Instant::now() + b),
+					(None, None) => None,
+				};
+
+				debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
 				*entry.into_mut() = PeerState::Disabled {
-					open: SmallVec::new(),
-					banned_until
+					connections,
+					backoff_until
 				}
 			},
 
@@ -521,7 +600,7 @@ impl GenericProto {
 			Some(PeerState::Incoming { .. }) => false,
 			Some(PeerState::Requested) => false,
 			Some(PeerState::PendingRequest { .. }) => false,
-			Some(PeerState::Banned { .. }) => false,
+			Some(PeerState::Backoff { .. }) => false,
 			Some(PeerState::Poisoned) => false,
 		}
 	}
@@ -591,7 +670,8 @@ impl GenericProto {
 
 	/// Function that is called when the peerset wants us to connect to a peer.
 	fn peerset_report_connect(&mut self, peer_id: PeerId) {
-		let mut occ_entry = match self.peers.entry(peer_id) {
+		// If `PeerId` is unknown to us, insert an entry, start dialing, and return early.
+		let mut occ_entry = match self.peers.entry(peer_id.clone()) {
 			Entry::Occupied(entry) => entry,
 			Entry::Vacant(entry) => {
 				// If there's no entry in `self.peers`, start dialing.
@@ -609,26 +689,19 @@ impl GenericProto {
 		let now = Instant::now();
 
 		match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) {
-			PeerState::Banned { ref until } if *until > now => {
+			// Backoff (not expired) => PendingRequest
+			PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => {
 				let peer_id = occ_entry.key().clone();
 				debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \
-					until {:?}", peer_id, until);
-
-				let delay_id = self.next_delay_id;
-				self.next_delay_id.0 += 1;
-				let delay = futures_timer::Delay::new(*until - now);
-				self.delays.push(async move {
-					delay.await;
-					(delay_id, peer_id)
-				}.boxed());
-
+					until {:?}", peer_id, timer_deadline);
 				*occ_entry.into_mut() = PeerState::PendingRequest {
-					timer: delay_id,
-					timer_deadline: *until,
+					timer: *timer,
+					timer_deadline: *timer_deadline,
 				};
 			},
 
-			PeerState::Banned { .. } => {
+			// Backoff (expired) => Requested
+			PeerState::Backoff { .. } => {
 				debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key());
 				debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key());
 				self.events.push_back(NetworkBehaviourAction::DialPeer {
@@ -638,42 +711,90 @@ impl GenericProto {
 				*occ_entry.into_mut() = PeerState::Requested;
 			},
 
+			// Disabled (with non-expired ban) => DisabledPendingEnable
 			PeerState::Disabled {
-				open,
-				banned_until: Some(ref banned)
-			} if *banned > now => {
+				connections,
+				backoff_until: Some(ref backoff)
+			} if *backoff > now => {
 				let peer_id = occ_entry.key().clone();
-				debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}",
-					peer_id, banned);
+				debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is backed-off until {:?}",
+					peer_id, backoff);
 
 				let delay_id = self.next_delay_id;
 				self.next_delay_id.0 += 1;
-				let delay = futures_timer::Delay::new(*banned - now);
+				let delay = futures_timer::Delay::new(*backoff - now);
 				self.delays.push(async move {
 					delay.await;
 					(delay_id, peer_id)
 				}.boxed());
 
 				*occ_entry.into_mut() = PeerState::DisabledPendingEnable {
-					open,
+					connections,
 					timer: delay_id,
-					timer_deadline: *banned,
+					timer_deadline: *backoff,
 				};
 			},
 
-			PeerState::Disabled { open, banned_until: _ } => {
-				debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.",
-					occ_entry.key());
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key());
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: occ_entry.key().clone(),
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Enable,
-				});
-				*occ_entry.into_mut() = PeerState::Enabled { open };
+			// Disabled => Enabled
+			PeerState::Disabled { mut connections, backoff_until } => {
+				debug_assert!(!connections.iter().any(|(_, s)| {
+					matches!(s, ConnectionState::Open(_))
+				}));
+
+				// The first element of `closed` is chosen to open the notifications substream.
+				if let Some((connec_id, connec_state)) = connections.iter_mut()
+					.find(|(_, s)| matches!(s, ConnectionState::Closed))
+				{
+					debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.",
+						occ_entry.key());
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Open,
+					});
+					*connec_state = ConnectionState::Opening;
+					*occ_entry.into_mut() = PeerState::Enabled { connections };
+				} else {
+					// If no connection is available, switch to `DisabledPendingEnable` in order
+					// to try again later.
+					debug_assert!(connections.iter().any(|(_, s)| {
+						matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing)
+					}));
+					debug!(
+						target: "sub-libp2p",
+						"PSM => Connect({:?}): No connection in proper state. Delaying.",
+						occ_entry.key()
+					);
+
+					let timer_deadline = {
+						let base = now + Duration::from_secs(5);
+						if let Some(backoff_until) = backoff_until {
+							cmp::max(base, backoff_until)
+						} else {
+							base
+						}
+					};
+
+					let delay_id = self.next_delay_id;
+					self.next_delay_id.0 += 1;
+					debug_assert!(timer_deadline > now);
+					let delay = futures_timer::Delay::new(timer_deadline - now);
+					self.delays.push(async move {
+						delay.await;
+						(delay_id, peer_id)
+					}.boxed());
+
+					*occ_entry.into_mut() = PeerState::DisabledPendingEnable {
+						connections,
+						timer: delay_id,
+						timer_deadline,
+					};
+				}
 			},
 
-			PeerState::Incoming => {
+			// Incoming => Enabled
+			PeerState::Incoming { mut connections, .. } => {
 				debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.",
 					occ_entry.key());
 				if let Some(inc) = self.incoming.iter_mut()
@@ -683,36 +804,50 @@ impl GenericProto {
 					error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \
 						incoming for incoming peer")
 				}
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key());
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: occ_entry.key().clone(),
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Enable,
-				});
-				*occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() };
+
+				debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", occ_entry.key(), *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: occ_entry.key().clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Open,
+					});
+					*connec_state = ConnectionState::Opening;
+				}
+
+				*occ_entry.into_mut() = PeerState::Enabled { connections };
 			},
 
+			// Other states are kept as-is.
 			st @ PeerState::Enabled { .. } => {
 				warn!(target: "sub-libp2p",
 					"PSM => Connect({:?}): Already connected.",
 					occ_entry.key());
 				*occ_entry.into_mut() = st;
+				debug_assert!(false);
 			},
 			st @ PeerState::DisabledPendingEnable { .. } => {
 				warn!(target: "sub-libp2p",
 					"PSM => Connect({:?}): Already pending enabling.",
 					occ_entry.key());
 				*occ_entry.into_mut() = st;
+				debug_assert!(false);
 			},
 			st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => {
 				warn!(target: "sub-libp2p",
 					"PSM => Connect({:?}): Duplicate request.",
 					occ_entry.key());
 				*occ_entry.into_mut() = st;
+				debug_assert!(false);
 			},
 
-			PeerState::Poisoned =>
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()),
+			PeerState::Poisoned => {
+				error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key());
+				debug_assert!(false);
+			},
 		}
 	}
 
@@ -727,43 +862,66 @@ impl GenericProto {
 		};
 
 		match mem::replace(entry.get_mut(), PeerState::Poisoned) {
-			st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => {
+			st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => {
 				debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key());
 				*entry.into_mut() = st;
 			},
 
-			PeerState::DisabledPendingEnable {
-				open,
-				timer_deadline,
-				timer: _
-			} => {
+			// DisabledPendingEnable => Disabled
+			PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => {
+				debug_assert!(!connections.is_empty());
 				debug!(target: "sub-libp2p",
 					"PSM => Drop({:?}): Interrupting pending enabling.",
 					entry.key());
 				*entry.into_mut() = PeerState::Disabled {
-					open,
-					banned_until: Some(timer_deadline),
+					connections,
+					backoff_until: Some(timer_deadline),
 				};
 			},
 
-			PeerState::Enabled { open } => {
+			// Enabled => Disabled
+			PeerState::Enabled { mut connections } => {
 				debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key());
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key());
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: entry.key().clone(),
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Disable,
-				});
-				*entry.into_mut() = PeerState::Disabled {
-					open,
-					banned_until: None
+
+				debug_assert!(connections.iter().any(|(_, s)|
+					matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+
+				if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) {
+					debug!(target: "sub-libp2p", "External API <= Closed({})", entry.key());
+					let event = GenericProtoOut::CustomProtocolClosed {
+						peer_id: entry.key().clone(),
+					};
+					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
 				}
+
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::Opening))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: entry.key().clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::OpeningThenClosing;
+				}
+
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::Open(_)))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", entry.key(), *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: entry.key().clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::Closing;
+				}
+
+				*entry.into_mut() = PeerState::Disabled { connections, backoff_until: None }
 			},
-			st @ PeerState::Incoming => {
-				error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).",
-					entry.key());
-				*entry.into_mut() = st;
-			},
+
+			// Requested => Ø
 			PeerState::Requested => {
 				// We don't cancel dialing. Libp2p doesn't expose that on purpose, as other
 				// sub-systems (such as the discovery mechanism) may require dialing this peer as
@@ -771,13 +929,24 @@ impl GenericProto {
 				debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key());
 				entry.remove();
 			},
-			PeerState::PendingRequest { timer_deadline, .. } => {
+
+			// PendingRequest => Backoff
+			PeerState::PendingRequest { timer, timer_deadline } => {
 				debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key());
-				*entry.into_mut() = PeerState::Banned { until: timer_deadline }
+				*entry.into_mut() = PeerState::Backoff { timer, timer_deadline }
 			},
 
-			PeerState::Poisoned =>
-				error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()),
+			// Invalid state transitions.
+			st @ PeerState::Incoming { .. } => {
+				error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).",
+					entry.key());
+				*entry.into_mut() = st;
+				debug_assert!(!false);
+			},
+			PeerState::Poisoned => {
+				error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key());
+				debug_assert!(!false);
+			},
 		}
 	}
 
@@ -792,28 +961,56 @@ impl GenericProto {
 		};
 
 		if !incoming.alive {
-			debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming,
-				sending back dropped", index, incoming.peer_id);
-			debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id);
-			self.peerset.dropped(incoming.peer_id);
+			debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming",
+				index, incoming.peer_id);
+			match self.peers.get_mut(&incoming.peer_id) {
+				Some(PeerState::DisabledPendingEnable { .. }) |
+				Some(PeerState::Enabled { .. }) => {}
+				_ => {
+					debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id);
+					self.peerset.dropped(incoming.peer_id);
+				},
+			}
 			return
 		}
 
-		match self.peers.get_mut(&incoming.peer_id) {
-			Some(state @ PeerState::Incoming) => {
+		let state = match self.peers.get_mut(&incoming.peer_id) {
+			Some(s) => s,
+			None => {
+				debug_assert!(false);
+				return;
+			}
+		};
+
+		match mem::replace(state, PeerState::Poisoned) {
+			// Incoming => Enabled
+			PeerState::Incoming { mut connections, .. } => {
 				debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.",
 					index, incoming.peer_id);
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: incoming.peer_id,
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Enable,
-				});
-				*state = PeerState::Enabled { open: SmallVec::new() };
+
+				debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", incoming.peer_id, *connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: incoming.peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Open,
+					});
+					*connec_state = ConnectionState::Opening;
+				}
+
+				*state = PeerState::Enabled { connections };
+			}
+
+			// Any state other than `Incoming` is invalid.
+			peer => {
+				error!(target: "sub-libp2p",
+					"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
+					peer);
+				debug_assert!(false);
 			}
-			peer => error!(target: "sub-libp2p",
-				"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
-				peer)
 		}
 	}
 
@@ -832,20 +1029,34 @@ impl GenericProto {
 			return
 		}
 
-		match self.peers.get_mut(&incoming.peer_id) {
-			Some(state @ PeerState::Incoming) => {
+		let state = match self.peers.get_mut(&incoming.peer_id) {
+			Some(s) => s,
+			None => {
+				debug_assert!(false);
+				return;
+			}
+		};
+
+		match mem::replace(state, PeerState::Poisoned) {
+			// Incoming => Disabled
+			PeerState::Incoming { mut connections, backoff_until } => {
 				debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.",
 					index, incoming.peer_id);
-				debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: incoming.peer_id,
-					handler: NotifyHandler::All,
-					event: NotifsHandlerIn::Disable,
-				});
-				*state = PeerState::Disabled {
-					open: SmallVec::new(),
-					banned_until: None
-				};
+
+				debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
+				for (connec_id, connec_state) in connections.iter_mut()
+					.filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))
+				{
+					debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close", incoming.peer_id, connec_id);
+					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+						peer_id: incoming.peer_id.clone(),
+						handler: NotifyHandler::One(*connec_id),
+						event: NotifsHandlerIn::Close,
+					});
+					*connec_state = ConnectionState::Closing;
+				}
+
+				*state = PeerState::Disabled { connections, backoff_until };
 			}
 			peer => error!(target: "sub-libp2p",
 				"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
@@ -873,212 +1084,309 @@ impl NetworkBehaviour for GenericProto {
 	}
 
 	fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) {
-		debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.",
-			conn, endpoint, peer_id);
-		match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) {
-			(st @ &mut PeerState::Requested, endpoint) |
-			(st @ &mut PeerState::PendingRequest { .. }, endpoint) => {
+		match self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned) {
+			// Requested | PendingRequest => Enabled
+			st @ &mut PeerState::Requested |
+			st @ &mut PeerState::PendingRequest { .. } => {
 				debug!(target: "sub-libp2p",
 					"Libp2p => Connected({}, {:?}): Connection was requested by PSM.",
 					peer_id, endpoint
 				);
-				*st = PeerState::Enabled { open: SmallVec::new() };
+				debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", peer_id, *conn);
 				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
 					peer_id: peer_id.clone(),
 					handler: NotifyHandler::One(*conn),
-					event: NotifsHandlerIn::Enable
+					event: NotifsHandlerIn::Open
 				});
-			}
 
-			// Note: it may seem weird that "Banned" peers get treated as if they were absent.
-			// This is because the word "Banned" means "temporarily prevent outgoing connections to
-			// this peer", and not "banned" in the sense that we would refuse the peer altogether.
-			(st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) |
-			(st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => {
-				let incoming_id = self.next_incoming_index;
-				self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) {
-					Some(v) => v,
-					None => {
-						error!(target: "sub-libp2p", "Overflow in next_incoming_index");
-						return
-					}
-				};
-				debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection",
-					peer_id, endpoint);
-				debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).",
-					peer_id, incoming_id);
-				self.peerset.incoming(peer_id.clone(), incoming_id);
-				self.incoming.push(IncomingPeer {
-					peer_id: peer_id.clone(),
-					alive: true,
-					incoming_id,
-				});
-				*st = PeerState::Incoming { };
+				let mut connections = SmallVec::new();
+				connections.push((*conn, ConnectionState::Opening));
+				*st = PeerState::Enabled { connections };
 			}
 
-			(st @ &mut PeerState::Poisoned, endpoint) |
-			(st @ &mut PeerState::Banned { .. }, endpoint) => {
-				let banned_until = if let PeerState::Banned { until } = st {
-					Some(*until)
+			// Poisoned gets inserted above if the entry was missing.
+			// Ø | Backoff => Disabled
+			st @ &mut PeerState::Poisoned |
+			st @ &mut PeerState::Backoff { .. } => {
+				let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st {
+					Some(*timer_deadline)
 				} else {
 					None
 				};
 				debug!(target: "sub-libp2p",
-					"Libp2p => Connected({},{:?}): Not requested by PSM, disabling.",
-					peer_id, endpoint);
-				*st = PeerState::Disabled { open: SmallVec::new(), banned_until };
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: peer_id.clone(),
-					handler: NotifyHandler::One(*conn),
-					event: NotifsHandlerIn::Disable
-				});
-			}
+					"Libp2p => Connected({}, {:?}, {:?}): Not requested by PSM, disabling.",
+					peer_id, endpoint, *conn);
 
-			(PeerState::Incoming { .. }, _) => {
-				debug!(target: "sub-libp2p",
-					"Secondary connection {:?} to {} waiting for PSM decision.",
-					conn, peer_id);
-			},
-
-			(PeerState::Enabled { .. }, _) => {
-				debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection",
-					peer_id, conn);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: peer_id.clone(),
-					handler: NotifyHandler::One(*conn),
-					event: NotifsHandlerIn::Enable
-				});
+				let mut connections = SmallVec::new();
+				connections.push((*conn, ConnectionState::Closed));
+				*st = PeerState::Disabled { connections, backoff_until };
 			}
 
-			(PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => {
-				debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection",
-					peer_id, conn);
-				self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-					peer_id: peer_id.clone(),
-					handler: NotifyHandler::One(*conn),
-					event: NotifsHandlerIn::Disable
-				});
+			// In all other states, add this new connection to the list of closed inactive
+			// connections.
+			PeerState::Incoming { connections, .. } |
+			PeerState::Disabled { connections, .. } |
+			PeerState::DisabledPendingEnable { connections, .. } |
+			PeerState::Enabled { connections, .. } => {
+				debug!(target: "sub-libp2p",
+					"Libp2p => Connected({}, {:?}, {:?}): Secondary connection. Leaving closed.",
+					peer_id, endpoint, *conn);
+				connections.push((*conn, ConnectionState::Closed));
 			}
 		}
 	}
 
-	fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) {
-		debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.",
-			conn, endpoint, peer_id);
-		match self.peers.get_mut(peer_id) {
-			Some(PeerState::Disabled { open, .. }) |
-			Some(PeerState::DisabledPendingEnable { open, .. }) |
-			Some(PeerState::Enabled { open, .. }) => {
-				// Check if the "link" to the peer is already considered closed,
-				// i.e. there is no connection that is open for custom protocols,
-				// in which case `CustomProtocolClosed` was already emitted.
-				let closed = open.is_empty();
-				let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn);
-				open.retain(|(c, _)| c != conn);
-				if !closed {
-					if let Some((_, sink)) = open.get(0) {
-						if sink_closed {
-							let event = GenericProtoOut::CustomProtocolReplaced {
-								peer_id: peer_id.clone(),
-								notifications_sink: sink.clone(),
+	fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint) {
+		let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) {
+			entry
+		} else {
+			error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler");
+			debug_assert!(false);
+			return
+		};
+
+		match mem::replace(entry.get_mut(), PeerState::Poisoned) {
+			// Disabled => Disabled | Backoff | Ø
+			PeerState::Disabled { mut connections, backoff_until } => {
+				debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}): Disabled.", peer_id, *conn);
+
+				if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) {
+					connections.remove(pos);
+				} else {
+					debug_assert!(false);
+					error!(target: "sub-libp2p",
+						"inject_connection_closed: State mismatch in the custom protos handler");
+				}
+
+				if connections.is_empty() {
+					if let Some(until) = backoff_until {
+						let now = Instant::now();
+						if until > now {
+							let delay_id = self.next_delay_id;
+							self.next_delay_id.0 += 1;
+							let delay = futures_timer::Delay::new(until - now);
+							let peer_id = peer_id.clone();
+							self.delays.push(async move {
+								delay.await;
+								(delay_id, peer_id)
+							}.boxed());
+
+							*entry.get_mut() = PeerState::Backoff {
+								timer: delay_id,
+								timer_deadline: until,
 							};
-							self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+						} else {
+							entry.remove();
 						}
 					} else {
-						debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id);
-						let event = GenericProtoOut::CustomProtocolClosed {
-							peer_id: peer_id.clone(),
-							reason: "Disconnected by libp2p".into(),
-						};
-
-						self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+						entry.remove();
 					}
+				} else {
+					*entry.get_mut() = PeerState::Disabled { connections, backoff_until };
 				}
-			}
-			_ => {}
-		}
-	}
+			},
 
-	fn inject_disconnected(&mut self, peer_id: &PeerId) {
-		match self.peers.remove(peer_id) {
-			None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) |
-			Some(PeerState::Banned { .. }) =>
-				// This is a serious bug either in this state machine or in libp2p.
-				error!(target: "sub-libp2p",
-					"`inject_disconnected` called for unknown peer {}",
-					peer_id),
+			// DisabledPendingEnable => DisabledPendingEnable | Backoff
+			PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => {
+				debug!(
+					target: "sub-libp2p",
+					"Libp2p => Disconnected({}, {:?}): Disabled but pending enable.",
+					peer_id, *conn
+				);
 
-			Some(PeerState::Disabled { open, banned_until, .. }) => {
-				if !open.is_empty() {
+				if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) {
+					connections.remove(pos);
+				} else {
 					debug_assert!(false);
-					error!(
-						target: "sub-libp2p",
-						"State mismatch: disconnected from {} with non-empty list of connections",
-						peer_id
-					);
+					error!(target: "sub-libp2p",
+						"inject_connection_closed: State mismatch in the custom protos handler");
 				}
-				debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id);
-				if let Some(until) = banned_until {
-					self.peers.insert(peer_id.clone(), PeerState::Banned { until });
+
+				if connections.is_empty() {
+					debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id);
+					self.peerset.dropped(peer_id.clone());
+					*entry.get_mut() = PeerState::Backoff { timer, timer_deadline };
+
+				} else {
+					*entry.get_mut() = PeerState::DisabledPendingEnable {
+						connections, timer_deadline, timer
+					};
 				}
-			}
+			},
 
-			Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => {
-				if !open.is_empty() {
+			// Incoming => Incoming | Disabled | Backoff | Ø
+			PeerState::Incoming { mut connections, backoff_until } => {
+				debug!(
+					target: "sub-libp2p",
+					"Libp2p => Disconnected({}, {:?}): OpenDesiredByRemote.",
+					peer_id, *conn
+				);
+
+				debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
+
+				if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) {
+					connections.remove(pos);
+				} else {
 					debug_assert!(false);
-					error!(
-						target: "sub-libp2p",
-						"State mismatch: disconnected from {} with non-empty list of connections",
-						peer_id
-					);
+					error!(target: "sub-libp2p",
+						"inject_connection_closed: State mismatch in the custom protos handler");
+				}
+
+				let no_desired_left = !connections.iter().any(|(_, s)| {
+					matches!(s, ConnectionState::OpenDesiredByRemote)
+				});
+
+				// If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming
+				// request.
+				if no_desired_left {
+					// In the incoming state, we don't report "Dropped". Instead we will just
+					// ignore the corresponding Accept/Reject.
+					if let Some(state) = self.incoming.iter_mut()
+						.find(|i| i.alive && i.peer_id == *peer_id)
+					{
+						state.alive = false;
+					} else {
+						error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \
+							incoming corresponding to an incoming state in peers");
+						debug_assert!(false);
+					}
+				}
+
+				if connections.is_empty() {
+					if let Some(until) = backoff_until {
+						let now = Instant::now();
+						if until > now {
+							let delay_id = self.next_delay_id;
+							self.next_delay_id.0 += 1;
+							let delay = futures_timer::Delay::new(until - now);
+							let peer_id = peer_id.clone();
+							self.delays.push(async move {
+								delay.await;
+								(delay_id, peer_id)
+							}.boxed());
+
+							*entry.get_mut() = PeerState::Backoff {
+								timer: delay_id,
+								timer_deadline: until,
+							};
+						} else {
+							entry.remove();
+						}
+					} else {
+						entry.remove();
+					}
+
+				} else if no_desired_left {
+					// If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`.
+					*entry.get_mut() = PeerState::Disabled { connections, backoff_until };
+				} else {
+					*entry.get_mut() = PeerState::Incoming { connections, backoff_until };
 				}
-				debug!(target: "sub-libp2p",
-					"Libp2p => Disconnected({}): Was disabled but pending enable.",
-					peer_id);
-				debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id);
-				self.peerset.dropped(peer_id.clone());
-				self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline });
 			}
 
-			Some(PeerState::Enabled { open, .. }) => {
-				if !open.is_empty() {
+			// Enabled => Enabled | Backoff
+			// Peers are always backed-off when disconnecting while Enabled.
+			PeerState::Enabled { mut connections } => {
+				debug!(
+					target: "sub-libp2p",
+					"Libp2p => Disconnected({}, {:?}): Enabled.",
+					peer_id, *conn
+				);
+
+				debug_assert!(connections.iter().any(|(_, s)|
+					matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+
+				if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) {
+					let (_, state) = connections.remove(pos);
+					if let ConnectionState::Open(_) = state {
+						if let Some((replacement_pos, replacement_sink)) = connections
+							.iter()
+							.enumerate()
+							.filter_map(|(num, (_, s))| {
+								match s {
+									ConnectionState::Open(s) => Some((num, s.clone())),
+									_ => None
+								}
+							})
+							.next()
+						{
+							if pos <= replacement_pos {
+								debug!(target: "sub-libp2p", "External API <= Sink replaced({})", peer_id);
+								let event = GenericProtoOut::CustomProtocolReplaced {
+									peer_id: peer_id.clone(),
+									notifications_sink: replacement_sink,
+								};
+								self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+							}
+						} else {
+							debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id);
+							let event = GenericProtoOut::CustomProtocolClosed {
+								peer_id: peer_id.clone(),
+							};
+							self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+						}
+					}
+
+				} else {
+					error!(target: "sub-libp2p",
+						"inject_connection_closed: State mismatch in the custom protos handler");
 					debug_assert!(false);
-					error!(
-						target: "sub-libp2p",
-						"State mismatch: disconnected from {} with non-empty list of connections",
-						peer_id
-					);
 				}
-				debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id);
-				debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id);
-				self.peerset.dropped(peer_id.clone());
-				let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
-				self.peers.insert(peer_id.clone(), PeerState::Banned {
-					until: Instant::now() + Duration::from_secs(ban_dur)
-				});
-			}
 
-			// In the incoming state, we don't report "Dropped". Instead we will just ignore the
-			// corresponding Accept/Reject.
-			Some(PeerState::Incoming { }) => {
-				if let Some(state) = self.incoming.iter_mut()
-					.find(|i| i.alive && i.peer_id == *peer_id)
+				if connections.is_empty() {
+					debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id);
+					self.peerset.dropped(peer_id.clone());
+					let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng());
+
+					let delay_id = self.next_delay_id;
+					self.next_delay_id.0 += 1;
+					let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur));
+					let peer_id = peer_id.clone();
+					self.delays.push(async move {
+						delay.await;
+						(delay_id, peer_id)
+					}.boxed());
+
+					*entry.get_mut() = PeerState::Backoff {
+						timer: delay_id,
+						timer_deadline: Instant::now() + Duration::from_secs(ban_dur),
+					};
+
+				} else if !connections.iter().any(|(_, s)|
+					matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))
 				{
-					debug!(target: "sub-libp2p",
-						"Libp2p => Disconnected({}): Was in incoming mode with id {:?}.",
-						peer_id, state.incoming_id);
-					state.alive = false;
+					debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id);
+					self.peerset.dropped(peer_id.clone());
+
+					*entry.get_mut() = PeerState::Disabled {
+						connections,
+						backoff_until: None
+					};
+
 				} else {
-					error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \
-						corresponding to an incoming state in peers")
+					*entry.get_mut() = PeerState::Enabled { connections };
 				}
 			}
 
-			Some(PeerState::Poisoned) =>
-				error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id),
+			PeerState::Requested |
+			PeerState::PendingRequest { .. } |
+			PeerState::Backoff { .. } => {
+				// This is a serious bug either in this state machine or in libp2p.
+				error!(target: "sub-libp2p",
+					"`inject_connection_closed` called for unknown peer {}",
+					peer_id);
+				debug_assert!(false);
+			},
+			PeerState::Poisoned => {
+				error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id);
+				debug_assert!(false);
+			},
 		}
 	}
 
+	fn inject_disconnected(&mut self, _peer_id: &PeerId) {
+	}
+
 	fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) {
 		trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error);
 	}
@@ -1087,19 +1395,39 @@ impl NetworkBehaviour for GenericProto {
 		if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) {
 			match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 				// The peer is not in our list.
-				st @ PeerState::Banned { .. } => {
+				st @ PeerState::Backoff { .. } => {
 					trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id);
 					*entry.into_mut() = st;
 				},
 
 				// "Basic" situation: we failed to reach a peer that the peerset requested.
-				PeerState::Requested | PeerState::PendingRequest { .. } => {
+				st @ PeerState::Requested |
+				st @ PeerState::PendingRequest { .. } => {
 					debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id);
-					*entry.into_mut() = PeerState::Banned {
-						until: Instant::now() + Duration::from_secs(5)
-					};
+
 					debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id);
-					self.peerset.dropped(peer_id.clone())
+					self.peerset.dropped(peer_id.clone());
+
+					let now = Instant::now();
+					let ban_duration = match st {
+						PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now =>
+							cmp::max(timer_deadline - now, Duration::from_secs(5)),
+						_ => Duration::from_secs(5)
+					};
+
+					let delay_id = self.next_delay_id;
+					self.next_delay_id.0 += 1;
+					let delay = futures_timer::Delay::new(ban_duration);
+					let peer_id = peer_id.clone();
+					self.delays.push(async move {
+						delay.await;
+						(delay_id, peer_id)
+					}.boxed());
+
+					*entry.into_mut() = PeerState::Backoff {
+						timer: delay_id,
+						timer_deadline: now + ban_duration,
+					};
 				},
 
 				// We can still get dial failures even if we are already connected to the peer,
@@ -1110,8 +1438,10 @@ impl NetworkBehaviour for GenericProto {
 					*entry.into_mut() = st;
 				},
 
-				PeerState::Poisoned =>
-					error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id),
+				PeerState::Poisoned => {
+					error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id);
+					debug_assert!(false);
+				},
 			}
 
 		} else {
@@ -1127,123 +1457,271 @@ impl NetworkBehaviour for GenericProto {
 		event: NotifsHandlerOut,
 	) {
 		match event {
-			NotifsHandlerOut::Closed { endpoint, reason } => {
+			NotifsHandlerOut::OpenDesiredByRemote => {
 				debug!(target: "sub-libp2p",
-					"Handler({:?}) => Endpoint {:?} closed for custom protocols: {}",
-					source, endpoint, reason);
+					"Handler({:?}, {:?}]) => OpenDesiredByRemote",
+					source, connection);
 
 				let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) {
 					entry
 				} else {
-					error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler");
+					error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler");
+					debug_assert!(false);
 					return
 				};
 
-				let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) {
-					PeerState::Enabled { mut open } => {
-						let pos = open.iter().position(|(c, _)| c == &connection);
-						let sink_closed = pos == Some(0);
-						if let Some(pos) = pos {
-							open.remove(pos);
+				match mem::replace(entry.get_mut(), PeerState::Poisoned) {
+					// Incoming => Incoming
+					PeerState::Incoming { mut connections, backoff_until } => {
+						debug_assert!(connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::OpenDesiredByRemote)));
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) {
+							if let ConnectionState::Closed = *connec_state {
+								*connec_state = ConnectionState::OpenDesiredByRemote;
+							} else {
+								// Connections in `OpeningThenClosing` state are in a Closed phase,
+								// and as such can emit `OpenDesiredByRemote` messages.
+								// Since an `Open` and a `Close` messages have already been sent,
+								// there is nothing much that can be done about this anyway.
+								debug_assert!(matches!(
+									connec_state,
+									ConnectionState::OpeningThenClosing
+								));
+							}
 						} else {
-							debug_assert!(false);
 							error!(
 								target: "sub-libp2p",
-								"State mismatch with {}: unknown closed connection",
-								source
+								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
+							debug_assert!(false);
 						}
 
-						// TODO: We switch the entire peer state to "disabled" because of possible
-						// race conditions involving the legacy substream.
-						// Once https://github.com/paritytech/substrate/issues/5670 is done, this
-						// should be changed to stay in the `Enabled` state.
-						debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source);
-						debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source);
-						self.peerset.dropped(source.clone());
-						self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-							peer_id: source.clone(),
-							handler: NotifyHandler::All,
-							event: NotifsHandlerIn::Disable,
-						});
+						*entry.into_mut() = PeerState::Incoming { connections, backoff_until };
+					},
 
-						let last = open.is_empty();
-						let new_notifications_sink = open.iter().next().and_then(|(_, sink)|
-							if sink_closed {
-								Some(sink.clone())
+					PeerState::Enabled { mut connections } => {
+						debug_assert!(connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) {
+							if let ConnectionState::Closed = *connec_state {
+								debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", source, connection);
+								self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+									peer_id: source,
+									handler: NotifyHandler::One(connection),
+									event: NotifsHandlerIn::Open,
+								});
+								*connec_state = ConnectionState::Opening;
 							} else {
-								None
-							});
-
-						*entry.into_mut() = PeerState::Disabled {
-							open,
-							banned_until: None
-						};
-
-						(last, new_notifications_sink)
-					},
-					PeerState::Disabled { mut open, banned_until } => {
-						let pos = open.iter().position(|(c, _)| c == &connection);
-						let sink_closed = pos == Some(0);
-						if let Some(pos) = pos {
-							open.remove(pos);
+								// Connections in `OpeningThenClosing` and `Opening` are in a Closed
+								// phase, and as such can emit `OpenDesiredByRemote` messages.
+								// Since an `Open` message haS already been sent, there is nothing
+								// more to do.
+								debug_assert!(matches!(
+									connec_state,
+									ConnectionState::OpenDesiredByRemote | ConnectionState::Opening
+								));
+							}
 						} else {
-							debug_assert!(false);
 							error!(
 								target: "sub-libp2p",
-								"State mismatch with {}: unknown closed connection",
-								source
+								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
+							debug_assert!(false);
 						}
 
-						let last = open.is_empty();
-						let new_notifications_sink = open.iter().next().and_then(|(_, sink)|
-							if sink_closed {
-								Some(sink.clone())
-							} else {
-								None
-							});
+						*entry.into_mut() = PeerState::Enabled { connections };
+					},
 
-						*entry.into_mut() = PeerState::Disabled {
-							open,
-							banned_until
-						};
+					// Disabled => Disabled | Incoming
+					PeerState::Disabled { mut connections, backoff_until } => {
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) {
+							if let ConnectionState::Closed = *connec_state {
+								*connec_state = ConnectionState::OpenDesiredByRemote;
 
-						(last, new_notifications_sink)
-					},
-					PeerState::DisabledPendingEnable {
-						mut open,
-						timer,
-						timer_deadline
-					} => {
-						let pos = open.iter().position(|(c, _)| c == &connection);
-						let sink_closed = pos == Some(0);
-						if let Some(pos) = pos {
-							open.remove(pos);
+								let incoming_id = self.next_incoming_index;
+								self.next_incoming_index.0 += 1;
+
+								debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).",
+									source, incoming_id);
+								self.peerset.incoming(source.clone(), incoming_id);
+								self.incoming.push(IncomingPeer {
+									peer_id: source.clone(),
+									alive: true,
+									incoming_id,
+								});
+
+								*entry.into_mut() = PeerState::Incoming { connections, backoff_until };
+
+							} else {
+								// Connections in `OpeningThenClosing` are in a Closed phase, and
+								// as such can emit `OpenDesiredByRemote` messages.
+								// We ignore them.
+								debug_assert!(matches!(
+									connec_state,
+									ConnectionState::OpeningThenClosing
+								));
+							}
 						} else {
-							debug_assert!(false);
 							error!(
 								target: "sub-libp2p",
-								"State mismatch with {}: unknown closed connection",
-								source
+								"OpenDesiredByRemote: State mismatch in the custom protos handler"
 							);
+							debug_assert!(false);
 						}
+					}
+
+					// DisabledPendingEnable => DisabledPendingEnable | Incoming
+					PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => {
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) {
+							if let ConnectionState::Closed = *connec_state {
+								*connec_state = ConnectionState::OpenDesiredByRemote;
+
+								let incoming_id = self.next_incoming_index;
+								self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) {
+									Some(v) => v,
+									None => {
+										error!(target: "sub-libp2p", "Overflow in next_incoming_index");
+										return
+									}
+								};
+
+								debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).",
+									source, incoming_id);
+								self.peerset.incoming(source.clone(), incoming_id);
+								self.incoming.push(IncomingPeer {
+									peer_id: source.clone(),
+									alive: true,
+									incoming_id,
+								});
+
+								*entry.into_mut() = PeerState::Incoming {
+									connections,
+									backoff_until: Some(timer_deadline),
+								};
 
-						let last = open.is_empty();
-						let new_notifications_sink = open.iter().next().and_then(|(_, sink)|
-							if sink_closed {
-								Some(sink.clone())
 							} else {
-								None
-							});
+								// Connections in `OpeningThenClosing` are in a Closed phase, and
+								// as such can emit `OpenDesiredByRemote` messages.
+								// We ignore them.
+								debug_assert!(matches!(
+									connec_state,
+									ConnectionState::OpeningThenClosing
+								));
+								*entry.into_mut() = PeerState::DisabledPendingEnable {
+									connections,
+									timer,
+									timer_deadline,
+								};
+							}
+						} else {
+							error!(
+								target: "sub-libp2p",
+								"OpenDesiredByRemote: State mismatch in the custom protos handler"
+							);
+							debug_assert!(false);
+						}
+					}
 
-						*entry.into_mut() = PeerState::DisabledPendingEnable {
-							open,
-							timer,
-							timer_deadline
+					state => {
+						error!(target: "sub-libp2p",
+							   "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}",
+							   state);
+						debug_assert!(false);
+						return
+					}
+				};
+			}
+
+			NotifsHandlerOut::CloseDesired => {
+				debug!(target: "sub-libp2p",
+					"Handler({}, {:?}) => CloseDesired",
+					source, connection);
+
+				let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) {
+					entry
+				} else {
+					error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler");
+					debug_assert!(false);
+					return
+				};
+
+				match mem::replace(entry.get_mut(), PeerState::Poisoned) {
+					// Enabled => Enabled | Disabled
+					PeerState::Enabled { mut connections } => {
+						debug_assert!(connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+
+						let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) {
+							pos
+						} else {
+							error!(target: "sub-libp2p",
+								"CloseDesired: State mismatch in the custom protos handler");
+							debug_assert!(false);
+							return;
 						};
 
-						(last, new_notifications_sink)
+						if matches!(connections[pos].1, ConnectionState::Closing) {
+							return;
+						}
+
+						debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_)));
+						connections[pos].1 = ConnectionState::Closing;
+
+						debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close", source, connection);
+						self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+							peer_id: source.clone(),
+							handler: NotifyHandler::One(connection),
+							event: NotifsHandlerIn::Close,
+						});
+
+						if let Some((replacement_pos, replacement_sink)) = connections
+							.iter()
+							.enumerate()
+							.filter_map(|(num, (_, s))| {
+								match s {
+									ConnectionState::Open(s) => Some((num, s.clone())),
+									_ => None
+								}
+							})
+							.next()
+						{
+							if pos <= replacement_pos {
+								debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source);
+								let event = GenericProtoOut::CustomProtocolReplaced {
+									peer_id: source,
+									notifications_sink: replacement_sink,
+								};
+								self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+								*entry.into_mut() = PeerState::Enabled { connections, };
+							}
+
+						} else {
+							// List of open connections wasn't empty before but now it is.
+							if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) {
+								debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source);
+								self.peerset.dropped(source.clone());
+								*entry.into_mut() = PeerState::Disabled {
+									connections, backoff_until: None
+								};
+							} else {
+								*entry.into_mut() = PeerState::Enabled { connections };
+							}
+
+							debug!(target: "sub-libp2p", "External API <= Closed({:?})", source);
+							let event = GenericProtoOut::CustomProtocolClosed {
+								peer_id: source,
+							};
+							self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+						}
+					},
+
+					// All connections in `Disabled` and `DisabledPendingEnable` have been sent a
+					// `Close` message already, and as such ignore any `CloseDesired` message.
+					state @ PeerState::Disabled { .. } |
+					state @ PeerState::DisabledPendingEnable { .. } => {
+						*entry.into_mut() = state;
+						return;
 					},
 					state => {
 						error!(target: "sub-libp2p",
@@ -1251,103 +1729,227 @@ impl NetworkBehaviour for GenericProto {
 							state);
 						return
 					}
-				};
+				}
+			}
 
-				if last {
-					debug!(target: "sub-libp2p", "External API <= Closed({:?})", source);
-					let event = GenericProtoOut::CustomProtocolClosed {
-						reason,
-						peer_id: source,
-					};
-					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+			NotifsHandlerOut::CloseResult => {
+				debug!(target: "sub-libp2p",
+					"Handler({}, {:?}) => CloseResult",
+					source, connection);
+
+				match self.peers.get_mut(&source) {
+					// Move the connection from `Closing` to `Closed`.
+					Some(PeerState::DisabledPendingEnable { connections, .. }) |
+					Some(PeerState::Disabled { connections, .. }) |
+					Some(PeerState::Enabled { connections, .. }) => {
+						if let Some((_, connec_state)) = connections
+							.iter_mut()
+							.find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing))
+						{
+							*connec_state = ConnectionState::Closed;
+						} else {
+							error!(target: "sub-libp2p",
+								"CloseResult: State mismatch in the custom protos handler");
+							debug_assert!(false);
+						}
+					},
 
-				} else {
-					if let Some(new_notifications_sink) = new_notifications_sink {
-						let event = GenericProtoOut::CustomProtocolReplaced {
-							peer_id: source,
-							notifications_sink: new_notifications_sink,
-						};
-						self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+					state => {
+						error!(target: "sub-libp2p",
+							   "CloseResult: Unexpected state in the custom protos handler: {:?}",
+							   state);
+						debug_assert!(false);
 					}
-					debug!(target: "sub-libp2p", "Secondary connection closed custom protocol.");
 				}
 			}
 
-			NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => {
+			NotifsHandlerOut::OpenResultOk { received_handshake, notifications_sink, .. } => {
 				debug!(target: "sub-libp2p",
-					"Handler({:?}) => Endpoint {:?} open for custom protocols.",
-					source, endpoint);
-
-				let first = match self.peers.get_mut(&source) {
-					Some(PeerState::Enabled { ref mut open, .. }) |
-					Some(PeerState::DisabledPendingEnable { ref mut open, .. }) |
-					Some(PeerState::Disabled { ref mut open, .. }) => {
-						let first = open.is_empty();
-						if !open.iter().any(|(c, _)| *c == connection) {
-							open.push((connection, notifications_sink.clone()));
+					"Handler({}, {:?}) => OpenResultOk",
+					source, connection);
+
+				match self.peers.get_mut(&source) {
+					Some(PeerState::Enabled { connections, .. }) => {
+						debug_assert!(connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+						let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)));
+
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::Opening))
+						{
+							if !any_open {
+								debug!(target: "sub-libp2p", "External API <= Open({:?})", source);
+								let event = GenericProtoOut::CustomProtocolOpen {
+									peer_id: source,
+									received_handshake,
+									notifications_sink: notifications_sink.clone(),
+								};
+								self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+							}
+							*connec_state = ConnectionState::Open(notifications_sink);
+						} else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::OpeningThenClosing))
+						{
+							*connec_state = ConnectionState::Closing;
 						} else {
-							error!(
-								target: "sub-libp2p",
-								"State mismatch: connection with {} opened a second time",
-								source
-							);
+							debug_assert!(false);
+							error!(target: "sub-libp2p",
+								"OpenResultOk State mismatch in the custom protos handler");
+						}
+					},
+
+					Some(PeerState::DisabledPendingEnable { connections, .. }) |
+					Some(PeerState::Disabled { connections, .. }) => {
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::OpeningThenClosing))
+						{
+							*connec_state = ConnectionState::Closing;
+						} else {
+							error!(target: "sub-libp2p",
+								"OpenResultOk State mismatch in the custom protos handler");
+							debug_assert!(false);
 						}
-						first
 					}
+
 					state => {
 						error!(target: "sub-libp2p",
-							   "Open: Unexpected state in the custom protos handler: {:?}",
+							   "OpenResultOk: Unexpected state in the custom protos handler: {:?}",
 							   state);
+						debug_assert!(false);
 						return
 					}
+				}
+			}
+
+			NotifsHandlerOut::OpenResultErr => {
+				debug!(target: "sub-libp2p",
+					"Handler({:?}, {:?}) => OpenResultErr",
+					source, connection);
+
+				let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) {
+					entry
+				} else {
+					error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler");
+					debug_assert!(false);
+					debug_assert!(false);
+					return
+				};
+
+				match mem::replace(entry.get_mut(), PeerState::Poisoned) {
+					PeerState::Enabled { mut connections } => {
+						debug_assert!(connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::Opening | ConnectionState::Open(_))));
+
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::Opening))
+						{
+							*connec_state = ConnectionState::Closed;
+						} else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::OpeningThenClosing))
+						{
+							*connec_state = ConnectionState::Closing;
+						} else {
+							error!(target: "sub-libp2p",
+								"OpenResultErr: State mismatch in the custom protos handler");
+							debug_assert!(false);
+						}
+
+						if !connections.iter().any(|(_, s)|
+							matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))
+						{
+							debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source);
+							self.peerset.dropped(source.clone());
+
+							*entry.into_mut() = PeerState::Disabled {
+								connections,
+								backoff_until: None
+							};
+						} else {
+							*entry.into_mut() = PeerState::Enabled { connections };
+						}
+					},
+					PeerState::Disabled { mut connections, backoff_until } => {
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::OpeningThenClosing))
+						{
+							*connec_state = ConnectionState::Closing;
+						} else {
+							error!(target: "sub-libp2p",
+								"OpenResultErr: State mismatch in the custom protos handler");
+							debug_assert!(false);
+						}
+
+						*entry.into_mut() = PeerState::Disabled { connections, backoff_until };
+					},
+					PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => {
+						if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)|
+							*c == connection && matches!(s, ConnectionState::OpeningThenClosing))
+						{
+							*connec_state = ConnectionState::Closing;
+						} else {
+							error!(target: "sub-libp2p",
+								"OpenResultErr: State mismatch in the custom protos handler");
+							debug_assert!(false);
+						}
+
+						*entry.into_mut() = PeerState::DisabledPendingEnable {
+							connections,
+							timer,
+							timer_deadline,
+						};
+					},
+					state => {
+						error!(target: "sub-libp2p",
+							"Unexpected state in the custom protos handler: {:?}",
+							state);
+						debug_assert!(false);
+					}
 				};
+			}
 
-				if first {
-					debug!(target: "sub-libp2p", "External API <= Open({:?})", source);
-					let event = GenericProtoOut::CustomProtocolOpen {
+			NotifsHandlerOut::CustomMessage { message } => {
+				if self.is_open(&source) {
+					trace!(target: "sub-libp2p", "Handler({:?}) => Message", source);
+					trace!(target: "sub-libp2p", "External API <= Message({:?})", source);
+					let event = GenericProtoOut::LegacyMessage {
 						peer_id: source,
-						received_handshake,
-						notifications_sink
+						message,
 					};
-					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
 
+					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
 				} else {
-					debug!(
+					trace!(
 						target: "sub-libp2p",
-						"Handler({:?}) => Secondary connection opened custom protocol",
-						source
+						"Handler({:?}) => Post-close message. Dropping message.",
+						source,
 					);
 				}
 			}
 
-			NotifsHandlerOut::CustomMessage { message } => {
-				debug_assert!(self.is_open(&source));
-				trace!(target: "sub-libp2p", "Handler({:?}) => Message", source);
-				trace!(target: "sub-libp2p", "External API <= Message({:?})", source);
-				let event = GenericProtoOut::LegacyMessage {
-					peer_id: source,
-					message,
-				};
-
-				self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
-			}
-
 			NotifsHandlerOut::Notification { protocol_name, message } => {
-				debug_assert!(self.is_open(&source));
-				trace!(
-					target: "sub-libp2p",
-					"Handler({:?}) => Notification({:?})",
-					source,
-					protocol_name,
-				);
-				trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source);
-				let event = GenericProtoOut::Notification {
-					peer_id: source,
-					protocol_name,
-					message,
-				};
+				if self.is_open(&source) {
+					trace!(
+						target: "sub-libp2p",
+						"Handler({:?}) => Notification({:?})",
+						source,
+						protocol_name,
+					);
+					trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source);
+					let event = GenericProtoOut::Notification {
+						peer_id: source,
+						protocol_name,
+						message,
+					};
 
-				self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+					self.events.push_back(NetworkBehaviourAction::GenerateEvent(event));
+				} else {
+					trace!(
+						target: "sub-libp2p",
+						"Handler({:?}) => Post-close notification({:?})",
+						source,
+						protocol_name,
+					);
+				}
 			}
 		}
 	}
@@ -1400,6 +2002,11 @@ impl NetworkBehaviour for GenericProto {
 			};
 
 			match peer_state {
+				PeerState::Backoff { timer, .. } if *timer == delay_id => {
+					debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id);
+					self.peers.remove(&peer_id);
+				}
+
 				PeerState::PendingRequest { timer, .. } if *timer == delay_id => {
 					debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id);
 					self.events.push_back(NetworkBehaviourAction::DialPeer {
@@ -1409,14 +2016,33 @@ impl NetworkBehaviour for GenericProto {
 					*peer_state = PeerState::Requested;
 				}
 
-				PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => {
-					debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id);
-					self.events.push_back(NetworkBehaviourAction::NotifyHandler {
-						peer_id,
-						handler: NotifyHandler::All,
-						event: NotifsHandlerIn::Enable,
-					});
-					*peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) };
+				PeerState::DisabledPendingEnable { connections, timer, timer_deadline }
+					if *timer == delay_id =>
+				{
+					// The first element of `closed` is chosen to open the notifications substream.
+					if let Some((connec_id, connec_state)) = connections.iter_mut()
+						.find(|(_, s)| matches!(s, ConnectionState::Closed))
+					{
+						debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open (ban expired)",
+							peer_id, *connec_id);
+						self.events.push_back(NetworkBehaviourAction::NotifyHandler {
+							peer_id: peer_id.clone(),
+							handler: NotifyHandler::One(*connec_id),
+							event: NotifsHandlerIn::Open,
+						});
+						*connec_state = ConnectionState::Opening;
+						*peer_state = PeerState::Enabled {
+							connections: mem::replace(connections, Default::default()),
+						};
+					} else {
+						*timer_deadline = Instant::now() + Duration::from_secs(5);
+						let delay = futures_timer::Delay::new(Duration::from_secs(5));
+						let timer = *timer;
+						self.delays.push(async move {
+							delay.await;
+							(timer, peer_id)
+						}.boxed());
+					}
 				}
 
 				// We intentionally never remove elements from `delays`, and it may
diff --git a/substrate/client/network/src/protocol/generic_proto/handler.rs b/substrate/client/network/src/protocol/generic_proto/handler.rs
index 5845130a7db87d36a84a42db843f25455c7c5d21..0272261f67d57d5dc0fe64575df6883ad6c9deed 100644
--- a/substrate/client/network/src/protocol/generic_proto/handler.rs
+++ b/substrate/client/network/src/protocol/generic_proto/handler.rs
@@ -1,27 +1,1054 @@
+// Copyright 2019-2020 Parity Technologies (UK) Ltd.
 // This file is part of Substrate.
 
-// Copyright (C) 2020 Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-
-// This program is free software: you can redistribute it and/or modify
+// Substrate is free software: you can redistribute it and/or modify
 // it under the terms of the GNU General Public License as published by
 // the Free Software Foundation, either version 3 of the License, or
 // (at your option) any later version.
 
-// This program is distributed in the hope that it will be useful,
+// Substrate is distributed in the hope that it will be useful,
 // but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 // GNU General Public License for more details.
 
 // You should have received a copy of the GNU General Public License
-// along with this program. If not, see <https://www.gnu.org/licenses/>.
+// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming
+//! and outgoing substreams for all gossiping protocols together.
+//!
+//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the
+//! protocols that are Substrate-related and outside of the scope of libp2p.
+//!
+//! # Usage
+//!
+//! From an API perspective, the [`NotifsHandler`] is always in one of the following state (see [`State`]):
+//!
+//! - Closed substreams. This is the initial state.
+//! - Closed substreams, but remote desires them to be open.
+//! - Open substreams.
+//! - Open substreams, but remote desires them to be closed.
+//!
+//! The [`NotifsHandler`] can spontaneously switch between these states:
+//!
+//! - "Closed substreams" to "Closed substreams but open desired". When that happens, a
+//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted.
+//! - "Closed substreams but open desired" to "Closed substreams" (i.e. the remote has cancelled
+//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted.
+//! - "Open substreams" to "Open substreams but close desired". When that happens, a
+//! [`NotifsHandlerOut::CloseDesired`] is emitted.
+//!
+//! The user can instruct the `NotifsHandler` to switch from "closed" to "open" or vice-versa by
+//! sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The `NotifsHandler`
+//! must answer with [`NotifsHandlerOut::OpenResultOk`] or [`NotifsHandlerOut::OpenResultErr`], or
+//! with [`NotifsHandlerOut::CloseResult`].
+//!
+//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the `NotifsHandler` is now in the open
+//! state. When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is
+//! emitted, the `NotifsHandler` is now (or remains) in the closed state.
+//!
+//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back either a
+//! [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the remote will
+//! be left in a pending state.
+//!
+//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted
+//! [`NotifsHandlerIn::Open`] has gotten an answer.
+
+use crate::protocol::generic_proto::{
+	upgrade::{
+		NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream,
+		NotificationsHandshakeError, RegisteredProtocol, RegisteredProtocolSubstream,
+		RegisteredProtocolEvent, UpgradeCollec
+	},
+};
 
-pub use self::group::{
-	NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut
+use bytes::BytesMut;
+use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId};
+use libp2p::core::upgrade::{SelectUpgrade, InboundUpgrade, OutboundUpgrade};
+use libp2p::swarm::{
+	ProtocolsHandler, ProtocolsHandlerEvent,
+	IntoProtocolsHandler,
+	KeepAlive,
+	ProtocolsHandlerUpgrErr,
+	SubstreamProtocol,
+	NegotiatedSubstream,
 };
-pub use self::legacy::ConnectionKillError as LegacyConnectionKillError;
+use futures::{
+	channel::mpsc,
+	lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard},
+	prelude::*
+};
+use log::error;
+use parking_lot::{Mutex, RwLock};
+use smallvec::SmallVec;
+use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration};
+use wasm_timer::Instant;
+
+/// Number of pending notifications in asynchronous contexts.
+/// See [`NotificationsSink::reserve_notification`] for context.
+const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
+
+/// Number of pending notifications in synchronous contexts.
+const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048;
+
+/// Maximum duration to open a substream and receive the handshake message. After that, we
+/// consider that we failed to open the substream.
+const OPEN_TIMEOUT: Duration = Duration::from_secs(10);
+
+/// After successfully establishing a connection with the remote, we keep the connection open for
+/// at least this amount of time in order to give the rest of the code the chance to notify us to
+/// open substreams.
+const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5);
+
+/// Implements the `IntoProtocolsHandler` trait of libp2p.
+///
+/// Every time a connection with a remote starts, an instance of this struct is created and
+/// sent to a background task dedicated to this connection. Once the connection is established,
+/// it is turned into a [`NotifsHandler`].
+///
+/// See the documentation at the module level for more information.
+pub struct NotifsHandlerProto {
+	/// Prototypes for upgrades for inbound substreams, and the message we respond with in the
+	/// handshake.
+	in_protocols: Vec<(NotificationsIn, Arc<RwLock<Vec<u8>>>)>,
+
+	/// Name of protocols available for outbound substreams, and the initial handshake message we
+	/// send.
+	out_protocols: Vec<(Cow<'static, str>, Arc<RwLock<Vec<u8>>>)>,
+
+	/// Configuration for the legacy protocol upgrade.
+	legacy_protocol: RegisteredProtocol,
+}
+
+/// The actual handler once the connection has been established.
+///
+/// See the documentation at the module level for more information.
+pub struct NotifsHandler {
+	/// Prototypes for upgrades for inbound substreams, and the message we respond with in the
+	/// handshake.
+	in_protocols: Vec<(NotificationsIn, Arc<RwLock<Vec<u8>>>)>,
+
+	/// Name of protocols available for outbound substreams, and the initial handshake message we
+	/// send.
+	out_protocols: Vec<(Cow<'static, str>, Arc<RwLock<Vec<u8>>>)>,
+
+	/// When the connection with the remote has been successfully established.
+	when_connection_open: Instant,
+
+	/// Whether we are the connection dialer or listener.
+	endpoint: ConnectedPoint,
+
+	/// State of this handler.
+	state: State,
+
+	/// Configuration for the legacy protocol upgrade.
+	legacy_protocol: RegisteredProtocol,
+
+	/// The substreams where bidirectional communications happen.
+	legacy_substreams: SmallVec<[RegisteredProtocolSubstream<NegotiatedSubstream>; 4]>,
+
+	/// Contains substreams which are being shut down.
+	legacy_shutdown: SmallVec<[RegisteredProtocolSubstream<NegotiatedSubstream>; 4]>,
+
+	/// Events to return in priority from `poll`.
+	events_queue: VecDeque<
+		ProtocolsHandlerEvent<NotificationsOut, usize, NotifsHandlerOut, NotifsHandlerError>
+	>,
+}
+
+/// See the module-level documentation to learn about the meaning of these variants.
+enum State {
+	/// Handler is in the "Closed" state.
+	Closed {
+		/// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains
+		/// a boolean indicating whether an outgoing substream is still in the process of being
+		/// opened.
+		pending_opening: Vec<bool>,
+	},
+
+	/// Handler is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been emitted.
+	OpenDesiredByRemote {
+		/// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains
+		/// a substream opened by the remote and that hasn't been accepted/rejected yet.
+		///
+		/// Must always contain at least one `Some`.
+		in_substreams: Vec<Option<NotificationsInSubstream<NegotiatedSubstream>>>,
+
+		/// See [`State::Closed::pending_opening`].
+		pending_opening: Vec<bool>,
+	},
+
+	/// Handler is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is
+	/// consequently trying to open the various notifications substreams.
+	///
+	/// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must
+	/// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`].
+	Opening {
+		/// In the situation where either the legacy substream has been opened or the
+		/// handshake-bearing notifications protocol is open, but we haven't sent out any
+		/// [`NotifsHandlerOut::Open`] event yet, this contains the received handshake waiting to
+		/// be reported through the external API.
+		pending_handshake: Option<Vec<u8>>,
+
+		/// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains
+		/// a substream opened by the remote and that has been accepted.
+		///
+		/// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to
+		/// contain only `None`s.
+		in_substreams: Vec<Option<NotificationsInSubstream<NegotiatedSubstream>>>,
+
+		/// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains
+		/// an outbound substream that has been accepted by the remote.
+		///
+		/// Items that contain `None` mean that a substream is still being opened or has been
+		/// rejected by the remote. In other words, this `Vec` is kind of a mirror version of
+		/// [`State::Closed::pending_opening`].
+		///
+		/// Items that contain `Some(None)` have been rejected by the remote, most likely because
+		/// they don't support this protocol. At the time of writing, the external API doesn't
+		/// distinguish between the different protocols. From the external API's point of view,
+		/// either all protocols are open or none are open. In reality, light clients in particular
+		/// don't support for example the GrandPa protocol, and as such will refuse our outgoing
+		/// attempts. This is problematic in theory, but in practice this is handled properly at a
+		/// higher level. This flaw will fixed once the outer layers know to differentiate the
+		/// multiple protocols.
+		out_substreams: Vec<Option<Option<NotificationsOutSubstream<NegotiatedSubstream>>>>,
+	},
+
+	/// Handler is in the "Open" state.
+	Open {
+		/// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been
+		/// sent out. The notifications to send out can be pulled from this receivers.
+		/// We use two different channels in order to have two different channel sizes, but from
+		/// the receiving point of view, the two channels are the same.
+		/// The receivers are fused in case the user drops the [`NotificationsSink`] entirely.
+		notifications_sink_rx: stream::Select<
+			stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>,
+			stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>
+		>,
+
+		/// Vec of the same length as [`NotifsHandler::out_protocols`]. For each protocol, contains
+		/// an outbound substream that has been accepted by the remote.
+		///
+		/// On transition to [`State::Open`], all the elements must be `Some`. Elements are
+		/// switched to `None` only if the remote closes substreams, in which case `want_closed`
+		/// must be true.
+		out_substreams: Vec<Option<NotificationsOutSubstream<NegotiatedSubstream>>>,
+
+		/// Vec of the same length as [`NotifsHandler::in_protocols`]. For each protocol, contains
+		/// a substream opened by the remote and that has been accepted.
+		///
+		/// Contrary to [`State::OpenDesiredByRemote::in_substreams`], it is possible for this to
+		/// contain only `None`s.
+		in_substreams: Vec<Option<NotificationsInSubstream<NegotiatedSubstream>>>,
+
+		/// If true, at least one substream in [`State::Open::out_substreams`] has been closed or
+		/// reset by the remote and a [`NotifsHandlerOut::CloseDesired`] message has been sent
+		/// out.
+		want_closed: bool,
+	},
+}
+
+impl IntoProtocolsHandler for NotifsHandlerProto {
+	type Handler = NotifsHandler;
+
+	fn inbound_protocol(&self) -> SelectUpgrade<UpgradeCollec<NotificationsIn>, RegisteredProtocol> {
+		let in_protocols = self.in_protocols.iter()
+			.map(|(h, _)| h.clone())
+			.collect::<UpgradeCollec<_>>();
+
+		SelectUpgrade::new(in_protocols, self.legacy_protocol.clone())
+	}
+
+	fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler {
+		let num_out_proto = self.out_protocols.len();
+
+		NotifsHandler {
+			in_protocols: self.in_protocols,
+			out_protocols: self.out_protocols,
+			endpoint: connected_point.clone(),
+			when_connection_open: Instant::now(),
+			state: State::Closed {
+				pending_opening: (0..num_out_proto).map(|_| false).collect(),
+			},
+			legacy_protocol: self.legacy_protocol,
+			legacy_substreams: SmallVec::new(),
+			legacy_shutdown: SmallVec::new(),
+			events_queue: VecDeque::with_capacity(16),
+		}
+	}
+}
+
+/// Event that can be received by a `NotifsHandler`.
+#[derive(Debug, Clone)]
+pub enum NotifsHandlerIn {
+	/// Instruct the handler to open the notification substreams.
+	///
+	/// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a
+	/// [`NotifsHandlerOut::OpenResultErr`] event.
+	///
+	/// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is
+	/// already in the fly. It is however possible if a `Close` is still in the fly.
+	Open,
+
+	/// Instruct the handler to close the notification substreams, or reject any pending incoming
+	/// substream request.
+	///
+	/// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event.
+	Close,
+}
+
+/// Event that can be emitted by a `NotifsHandler`.
+#[derive(Debug)]
+pub enum NotifsHandlerOut {
+	/// Acknowledges a [`NotifsHandlerIn::Open`].
+	OpenResultOk {
+		/// The endpoint of the connection that is open for custom protocols.
+		endpoint: ConnectedPoint,
+		/// Handshake that was sent to us.
+		/// This is normally a "Status" message, but this out of the concern of this code.
+		received_handshake: Vec<u8>,
+		/// How notifications can be sent to this node.
+		notifications_sink: NotificationsSink,
+	},
+
+	/// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open
+	/// notification substreams.
+	OpenResultErr,
+
+	/// Acknowledges a [`NotifsHandlerIn::Close`].
+	CloseResult,
+
+	/// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a
+	/// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a
+	/// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not
+	/// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send
+	/// another [`NotifsHandlerIn`].
+	OpenDesiredByRemote,
+
+	/// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in
+	/// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet
+	/// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send
+	/// another one.
+	CloseDesired,
+
+	/// Received a non-gossiping message on the legacy substream.
+	///
+	/// Can only happen when the handler is in the open state.
+	CustomMessage {
+		/// Message that has been received.
+		///
+		/// Keep in mind that this can be a `ConsensusMessage` message, which then contains a
+		/// notification.
+		message: BytesMut,
+	},
+
+	/// Received a message on a custom protocol substream.
+	///
+	/// Can only happen when the handler is in the open state.
+	Notification {
+		/// Name of the protocol of the message.
+		protocol_name: Cow<'static, str>,
+
+		/// Message that has been received.
+		message: BytesMut,
+	},
+}
+
+/// Sink connected directly to the node background task. Allows sending notifications to the peer.
+///
+/// Can be cloned in order to obtain multiple references to the same peer.
+#[derive(Debug, Clone)]
+pub struct NotificationsSink {
+	inner: Arc<NotificationsSinkInner>,
+}
+
+#[derive(Debug)]
+struct NotificationsSinkInner {
+	/// Sender to use in asynchronous contexts. Uses an asynchronous mutex.
+	async_channel: FuturesMutex<mpsc::Sender<NotificationsSinkMessage>>,
+	/// Sender to use in synchronous contexts. Uses a synchronous mutex.
+	/// This channel has a large capacity and is meant to be used in contexts where
+	/// back-pressure cannot be properly exerted.
+	/// It will be removed in a future version.
+	sync_channel: Mutex<mpsc::Sender<NotificationsSinkMessage>>,
+}
+
+/// Message emitted through the [`NotificationsSink`] and processed by the background task
+/// dedicated to the peer.
+#[derive(Debug)]
+enum NotificationsSinkMessage {
+	/// Message emitted by [`NotificationsSink::reserve_notification`] and
+	/// [`NotificationsSink::write_notification_now`].
+	Notification {
+		protocol_name: Cow<'static, str>,
+		message: Vec<u8>,
+	},
+
+	/// Must close the connection.
+	ForceClose,
+}
+
+impl NotificationsSink {
+	/// Sends a notification to the peer.
+	///
+	/// If too many messages are already buffered, the notification is silently discarded and the
+	/// connection to the peer will be closed shortly after.
+	///
+	/// The protocol name is expected to be checked ahead of calling this method. It is a logic
+	/// error to send a notification using an unknown protocol.
+	///
+	/// This method will be removed in a future version.
+	pub fn send_sync_notification<'a>(
+		&'a self,
+		protocol_name: Cow<'static, str>,
+		message: impl Into<Vec<u8>>
+	) {
+		let mut lock = self.inner.sync_channel.lock();
+		let result = lock.try_send(NotificationsSinkMessage::Notification {
+			protocol_name,
+			message: message.into()
+		});
+
+		if result.is_err() {
+			// Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the
+			// buffer, and therefore `try_send` will succeed.
+			let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose);
+			debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected()));
+		}
+	}
+
+	/// Wait until the remote is ready to accept a notification.
+	///
+	/// Returns an error in the case where the connection is closed.
+	///
+	/// The protocol name is expected to be checked ahead of calling this method. It is a logic
+	/// error to send a notification using an unknown protocol.
+	pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result<Ready<'a>, ()> {
+		let mut lock = self.inner.async_channel.lock().await;
+
+		let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await;
+		if poll_ready.is_ok() {
+			Ok(Ready { protocol_name: protocol_name, lock })
+		} else {
+			Err(())
+		}
+	}
+}
+
+/// Notification slot is reserved and the notification can actually be sent.
+#[must_use]
+#[derive(Debug)]
+pub struct Ready<'a> {
+	/// Guarded channel. The channel inside is guaranteed to not be full.
+	lock: FuturesMutexGuard<'a, mpsc::Sender<NotificationsSinkMessage>>,
+	/// Name of the protocol. Should match one of the protocols passed at initialization.
+	protocol_name: Cow<'static, str>,
+}
+
+impl<'a> Ready<'a> {
+	/// Consumes this slots reservation and actually queues the notification.
+	///
+	/// Returns an error if the substream has been closed.
+	pub fn send(
+		mut self,
+		notification: impl Into<Vec<u8>>
+	) -> Result<(), ()> {
+		self.lock.start_send(NotificationsSinkMessage::Notification {
+			protocol_name: self.protocol_name,
+			message: notification.into(),
+		}).map_err(|_| ())
+	}
+}
+
+/// Error specific to the collection of protocols.
+#[derive(Debug, derive_more::Display, derive_more::Error)]
+pub enum NotifsHandlerError {
+	/// Channel of synchronous notifications is full.
+	SyncNotificationsClogged,
+}
+
+impl NotifsHandlerProto {
+	/// Builds a new handler.
+	///
+	/// `list` is a list of notification protocols names, and the message to send as part of the
+	/// handshake. At the moment, the message is always the same whether we open a substream
+	/// ourselves or respond to handshake from the remote.
+	///
+	/// The first protocol in `list` is special-cased as the protocol that contains the handshake
+	/// to report through the [`NotifsHandlerOut::Open`] event.
+	///
+	/// # Panic
+	///
+	/// - Panics if `list` is empty.
+	///
+	pub fn new(
+		legacy_protocol: RegisteredProtocol,
+		list: impl Into<Vec<(Cow<'static, str>, Arc<RwLock<Vec<u8>>>)>>,
+	) -> Self {
+		let list = list.into();
+		assert!(!list.is_empty());
+
+		let out_protocols = list
+			.clone()
+			.into_iter()
+			.collect();
+
+		let in_protocols = list.clone()
+			.into_iter()
+			.map(|(proto_name, msg)| (NotificationsIn::new(proto_name), msg))
+			.collect();
+
+		NotifsHandlerProto {
+			in_protocols,
+			out_protocols,
+			legacy_protocol,
+		}
+	}
+}
+
+impl ProtocolsHandler for NotifsHandler {
+	type InEvent = NotifsHandlerIn;
+	type OutEvent = NotifsHandlerOut;
+	type Error = NotifsHandlerError;
+	type InboundProtocol = SelectUpgrade<UpgradeCollec<NotificationsIn>, RegisteredProtocol>;
+	type OutboundProtocol = NotificationsOut;
+	// Index within the `out_protocols`.
+	type OutboundOpenInfo = usize;
+	type InboundOpenInfo = ();
+
+	fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
+		let in_protocols = self.in_protocols.iter()
+			.map(|(h, _)| h.clone())
+			.collect::<UpgradeCollec<_>>();
+
+		let proto = SelectUpgrade::new(in_protocols, self.legacy_protocol.clone());
+		SubstreamProtocol::new(proto, ())
+	}
+
+	fn inject_fully_negotiated_inbound(
+		&mut self,
+		out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
+		(): ()
+	) {
+		match out {
+			// Received notifications substream.
+			EitherOutput::First(((_remote_handshake, mut proto), num)) => {
+				match &mut self.state {
+					State::Closed { pending_opening } => {
+						self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
+							NotifsHandlerOut::OpenDesiredByRemote
+						));
+
+						let mut in_substreams = (0..self.in_protocols.len())
+							.map(|_| None)
+							.collect::<Vec<_>>();
+						in_substreams[num] = Some(proto);
+						self.state = State::OpenDesiredByRemote {
+							in_substreams,
+							pending_opening: mem::replace(pending_opening, Vec::new()),
+						};
+					},
+					State::OpenDesiredByRemote { in_substreams, .. } => {
+						if in_substreams[num].is_some() {
+							// If a substream already exists, silently drop the new one.
+							// Note that we drop the substream, which will send an equivalent to a
+							// TCP "RST" to the remote and force-close the substream. It might
+							// seem like an unclean way to get rid of a substream. However, keep
+							// in mind that it is invalid for the remote to open multiple such
+							// substreams, and therefore sending a "RST" is the most correct thing
+							// to do.
+							return;
+						}
+						in_substreams[num] = Some(proto);
+					},
+					State::Opening { in_substreams, .. } |
+					State::Open { in_substreams, .. } => {
+						if in_substreams[num].is_some() {
+							// Same remark as above.
+							return;
+						}
+
+						// We create `handshake_message` on a separate line to be sure
+						// that the lock is released as soon as possible.
+						let handshake_message = self.in_protocols[num].1.read().clone();
+						proto.send_handshake(handshake_message);
+						in_substreams[num] = Some(proto);
+					},
+				};
+			}
+
+			// Received legacy substream.
+			EitherOutput::Second((substream, _handshake)) => {
+				// Note: while we awknowledge legacy substreams and handle incoming messages,
+				// it doesn't trigger any `OpenDesiredByRemote` event as a way to simplify the
+				// logic of this code.
+				// Since mid-2019, legacy substreams are supposed to be used at the same time as
+				// notifications substreams, and not in isolation. Nodes that open legacy
+				// substreams in isolation are considered deprecated.
+				if self.legacy_substreams.len() <= 4 {
+					self.legacy_substreams.push(substream);
+				}
+			},
+		}
+	}
+
+	fn inject_fully_negotiated_outbound(
+		&mut self,
+		(handshake, substream): <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
+		num: Self::OutboundOpenInfo
+	) {
+		match &mut self.state {
+			State::Closed { pending_opening } |
+			State::OpenDesiredByRemote { pending_opening, .. } => {
+				debug_assert!(pending_opening[num]);
+				pending_opening[num] = false;
+			}
+			State::Open { .. } => {
+				error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler");
+				debug_assert!(false);
+			}
+			State::Opening { pending_handshake, in_substreams, out_substreams } => {
+				debug_assert!(out_substreams[num].is_none());
+				out_substreams[num] = Some(Some(substream));
+
+				if num == 0 {
+					debug_assert!(pending_handshake.is_none());
+					*pending_handshake = Some(handshake);
+				}
+
+				if !out_substreams.iter().any(|s| s.is_none()) {
+					let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE);
+					let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE);
+					let notifications_sink = NotificationsSink {
+						inner: Arc::new(NotificationsSinkInner {
+							async_channel: FuturesMutex::new(async_tx),
+							sync_channel: Mutex::new(sync_tx),
+						}),
+					};
+
+					debug_assert!(pending_handshake.is_some());
+					let pending_handshake = pending_handshake.take().unwrap_or_default();
+
+					let out_substreams = out_substreams
+						.drain(..)
+						.map(|s| s.expect("checked by the if above; qed"))
+						.collect();
+
+					self.state = State::Open {
+						notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()),
+						out_substreams,
+						in_substreams: mem::replace(in_substreams, Vec::new()),
+						want_closed: false,
+					};
+
+					self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
+						NotifsHandlerOut::OpenResultOk {
+							endpoint: self.endpoint.clone(),
+							received_handshake: pending_handshake,
+							notifications_sink
+						}
+					));
+				}
+			}
+		}
+	}
+
+	fn inject_event(&mut self, message: NotifsHandlerIn) {
+		match message {
+			NotifsHandlerIn::Open => {
+				match &mut self.state {
+					State::Closed { .. } | State::OpenDesiredByRemote { .. } => {
+						let (pending_opening, mut in_substreams) = match &mut self.state {
+							State::Closed { pending_opening } => (pending_opening, None),
+							State::OpenDesiredByRemote { pending_opening, in_substreams } =>
+								(pending_opening, Some(mem::replace(in_substreams, Vec::new()))),
+							_ => unreachable!()
+						};
+
+						for (n, is_pending) in pending_opening.iter().enumerate() {
+							if *is_pending {
+								continue;
+							}
+
+							let proto = NotificationsOut::new(
+								self.out_protocols[n].0.clone(),
+								self.out_protocols[n].1.read().clone()
+							);
+
+							self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest {
+								protocol: SubstreamProtocol::new(proto, n)
+									.with_timeout(OPEN_TIMEOUT),
+							});
+						}
+
+						if let Some(in_substreams) = in_substreams.as_mut() {
+							for (num, substream) in in_substreams.iter_mut().enumerate() {
+								let substream = match substream.as_mut() {
+									Some(s) => s,
+									None => continue,
+								};
+
+								let handshake_message = self.in_protocols[num].1.read().clone();
+								substream.send_handshake(handshake_message);
+							}
+						}
+
+						self.state = State::Opening {
+							pending_handshake: None,
+							in_substreams: if let Some(in_substreams) = in_substreams {
+								in_substreams
+							} else {
+								(0..self.in_protocols.len()).map(|_| None).collect()
+							},
+							out_substreams: (0..self.out_protocols.len()).map(|_| None).collect(),
+						};
+					},
+					State::Opening { .. } |
+					State::Open { .. } => {
+						// As documented, it is forbidden to send an `Open` while there is already
+						// one in the fly.
+						error!(target: "sub-libp2p", "opening already-opened handler");
+						debug_assert!(false);
+					},
+				}
+			},
+
+			NotifsHandlerIn::Close => {
+				for mut substream in self.legacy_substreams.drain() {
+					substream.shutdown();
+					self.legacy_shutdown.push(substream);
+				}
+
+				match &mut self.state {
+					State::Open { .. } => {
+						self.state = State::Closed {
+							pending_opening: Vec::new(),
+						};
+					},
+					State::Opening { out_substreams, .. } => {
+						let pending_opening = out_substreams.iter().map(|s| s.is_none()).collect();
+						self.state = State::Closed {
+							pending_opening,
+						};
+
+						self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
+							NotifsHandlerOut::OpenResultErr
+						));
+					},
+					State::OpenDesiredByRemote { pending_opening, .. } => {
+						self.state = State::Closed {
+							pending_opening: mem::replace(pending_opening, Vec::new()),
+						};
+					}
+					State::Closed { .. } => {},
+				}
+
+				self.events_queue.push_back(
+					ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult)
+				);
+			},
+		}
+	}
+
+	fn inject_dial_upgrade_error(
+		&mut self,
+		num: usize,
+		_: ProtocolsHandlerUpgrErr<NotificationsHandshakeError>
+	) {
+		match &mut self.state {
+			State::Closed { pending_opening } | State::OpenDesiredByRemote { pending_opening, .. } => {
+				debug_assert!(pending_opening[num]);
+				pending_opening[num] = false;
+			}
+
+			State::Opening { in_substreams, pending_handshake, out_substreams } => {
+				// Failing to open a substream isn't considered a failure. Instead, it is marked
+				// as `Some(None)` and the opening continues.
+
+				out_substreams[num] = Some(None);
+
+				// Some substreams are still being opened. Nothing more to do.
+				if out_substreams.iter().any(|s| s.is_none()) {
+					return;
+				}
+
+				// All substreams have finished being open.
+				// If the handshake has been received, proceed and report the opening.
+
+				if let Some(pending_handshake) = pending_handshake.take() {
+					// Open!
+					let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE);
+					let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE);
+					let notifications_sink = NotificationsSink {
+						inner: Arc::new(NotificationsSinkInner {
+							async_channel: FuturesMutex::new(async_tx),
+							sync_channel: Mutex::new(sync_tx),
+						}),
+					};
+
+					let out_substreams = out_substreams
+						.drain(..)
+						.map(|s| s.expect("checked by the if above; qed"))
+						.collect();
+
+					self.state = State::Open {
+						notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()),
+						out_substreams,
+						in_substreams: mem::replace(in_substreams, Vec::new()),
+						want_closed: false,
+					};
+
+					self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
+						NotifsHandlerOut::OpenResultOk {
+							endpoint: self.endpoint.clone(),
+							received_handshake: pending_handshake,
+							notifications_sink
+						}
+					));
+
+				} else {
+					// Open failure!
+					self.state = State::Closed {
+						pending_opening: (0..self.out_protocols.len()).map(|_| false).collect(),
+					};
+
+					self.events_queue.push_back(ProtocolsHandlerEvent::Custom(
+						NotifsHandlerOut::OpenResultErr
+					));
+				}
+			}
+
+			// No substream is being open when already `Open`.
+			State::Open { .. } => debug_assert!(false),
+		}
+	}
+
+	fn connection_keep_alive(&self) -> KeepAlive {
+		if !self.legacy_substreams.is_empty() {
+			return KeepAlive::Yes;
+		}
+
+		match self.state {
+			State::Closed { .. } => KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME),
+			State::OpenDesiredByRemote { .. } | State::Opening { .. } | State::Open { .. } =>
+				KeepAlive::Yes,
+		}
+	}
+
+	fn poll(
+		&mut self,
+		cx: &mut Context,
+	) -> Poll<
+		ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>
+	> {
+		if let Some(ev) = self.events_queue.pop_front() {
+			return Poll::Ready(ev);
+		}
+
+		// Poll inbound substreams.
+		// Inbound substreams being closed is always tolerated, except for the
+		// `OpenDesiredByRemote` state which might need to be switched back to `Closed`.
+		match &mut self.state {
+			State::Closed { .. } => {}
+			State::Open { in_substreams, .. } => {
+				for (num, substream) in in_substreams.iter_mut().enumerate() {
+					match substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) {
+						None | Some(Poll::Pending) => continue,
+						Some(Poll::Ready(Some(Ok(message)))) => {
+							let event = NotifsHandlerOut::Notification {
+								message,
+								protocol_name: self.in_protocols[num].0.protocol_name().clone(),
+							};
+							return Poll::Ready(ProtocolsHandlerEvent::Custom(event))
+						},
+						Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) =>
+							*substream = None,
+					}
+				}
+			}
+
+			State::OpenDesiredByRemote { in_substreams, .. } |
+			State::Opening { in_substreams, .. } => {
+				for substream in in_substreams {
+					match substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) {
+						None | Some(Poll::Pending) => continue,
+						Some(Poll::Ready(Ok(void))) => match void {},
+						Some(Poll::Ready(Err(_))) => *substream = None,
+					}
+				}
+			}
+		}
+
+		// Since the previous block might have closed inbound substreams, make sure that we can
+		// stay in `OpenDesiredByRemote` state.
+		if let State::OpenDesiredByRemote { in_substreams, pending_opening } = &mut self.state {
+			if !in_substreams.iter().any(|s| s.is_some()) {
+				self.state = State::Closed {
+					pending_opening: mem::replace(pending_opening, Vec::new()),
+				};
+				return Poll::Ready(ProtocolsHandlerEvent::Custom(
+					NotifsHandlerOut::CloseDesired
+				))
+			}
+		}
+
+		// Poll outbound substreams.
+		match &mut self.state {
+			State::Open { out_substreams, want_closed, .. } => {
+				let mut any_closed = false;
+
+				for substream in out_substreams.iter_mut() {
+					match substream.as_mut().map(|s| Sink::poll_flush(Pin::new(s), cx)) {
+						None | Some(Poll::Pending) | Some(Poll::Ready(Ok(()))) => continue,
+						Some(Poll::Ready(Err(_))) => {}
+					};
+
+					// Reached if the substream has been closed.
+					*substream = None;
+					any_closed = true;
+				}
+
+				if any_closed {
+					if !*want_closed {
+						*want_closed = true;
+						return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseDesired));
+					}
+				}
+			}
+
+			State::Opening { out_substreams, pending_handshake, .. } => {
+				debug_assert!(out_substreams.iter().any(|s| s.is_none()));
+
+				for (num, substream) in out_substreams.iter_mut().enumerate() {
+					match substream {
+						None | Some(None) => continue,
+						Some(Some(substream)) => match Sink::poll_flush(Pin::new(substream), cx) {
+							Poll::Pending | Poll::Ready(Ok(())) => continue,
+							Poll::Ready(Err(_)) => {}
+						}
+					}
+
+					// Reached if the substream has been closed.
+					*substream = Some(None);
+					if num == 0 {
+						// Cancel the handshake.
+						*pending_handshake = None;
+					}
+				}
+			}
+
+			State::Closed { .. } |
+			State::OpenDesiredByRemote { .. } => {}
+		}
+
+		if let State::Open { notifications_sink_rx, out_substreams, .. } = &mut self.state {
+			'poll_notifs_sink: loop {
+				// Before we poll the notifications sink receiver, check that all the notification
+				// channels are ready to send a message.
+				// TODO: it is planned that in the future we switch to one `NotificationsSink` per
+				// protocol, in which case each sink should wait only for its corresponding handler
+				// to be ready, and not all handlers
+				// see https://github.com/paritytech/substrate/issues/5670
+				for substream in out_substreams.iter_mut() {
+					match substream.as_mut().map(|s| s.poll_ready_unpin(cx)) {
+						None | Some(Poll::Ready(_)) => {},
+						Some(Poll::Pending) => break 'poll_notifs_sink
+					}
+				}
+
+				// Now that all substreams are ready for a message, grab what to send.
+				let message = match notifications_sink_rx.poll_next_unpin(cx) {
+					Poll::Ready(Some(msg)) => msg,
+					Poll::Ready(None) | Poll::Pending => break,
+				};
+
+				match message {
+					NotificationsSinkMessage::Notification {
+						protocol_name,
+						message
+					} => {
+						if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) {
+							if let Some(substream) = out_substreams[pos].as_mut() {
+								let _ = substream.start_send_unpin(message);
+								continue 'poll_notifs_sink;
+							}
+
+						} else {
+							log::warn!(
+								target: "sub-libp2p",
+								"Tried to send a notification on non-registered protocol: {:?}",
+								protocol_name
+							);
+						}
+					}
+					NotificationsSinkMessage::ForceClose => {
+						return Poll::Ready(
+							ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)
+						);
+					}
+				}
+			}
+		}
+
+		// The legacy substreams are polled only if the state is `Open`. Otherwise, it would be
+		// possible to receive notifications that would need to get silently discarded.
+		if matches!(self.state, State::Open { .. }) {
+			for n in (0..self.legacy_substreams.len()).rev() {
+				let mut substream = self.legacy_substreams.swap_remove(n);
+				let poll_outcome = Pin::new(&mut substream).poll_next(cx);
+				match poll_outcome {
+					Poll::Pending => self.legacy_substreams.push(substream),
+					Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => {
+						self.legacy_substreams.push(substream);
+						return Poll::Ready(ProtocolsHandlerEvent::Custom(
+							NotifsHandlerOut::CustomMessage { message }
+						))
+					},
+					Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => {
+						return Poll::Ready(ProtocolsHandlerEvent::Close(
+							NotifsHandlerError::SyncNotificationsClogged
+						))
+					}
+					Poll::Ready(None) | Poll::Ready(Some(Err(_))) => {
+						if matches!(poll_outcome, Poll::Ready(None)) {
+							self.legacy_shutdown.push(substream);
+						}
+
+						if let State::Open { want_closed, .. } = &mut self.state {
+							if !*want_closed {
+								*want_closed = true;
+								return Poll::Ready(ProtocolsHandlerEvent::Custom(
+									NotifsHandlerOut::CloseDesired
+								))
+							}
+						}
+					}
+				}
+			}
+		}
+
+		shutdown_list(&mut self.legacy_shutdown, cx);
+
+		Poll::Pending
+	}
+}
 
-mod group;
-mod legacy;
-mod notif_in;
-mod notif_out;
+/// Given a list of substreams, tries to shut them down. The substreams that have been successfully
+/// shut down are removed from the list.
+fn shutdown_list
+	(list: &mut SmallVec<impl smallvec::Array<Item = RegisteredProtocolSubstream<NegotiatedSubstream>>>,
+	cx: &mut Context)
+{
+	'outer: for n in (0..list.len()).rev() {
+		let mut substream = list.swap_remove(n);
+		loop {
+			match substream.poll_next_unpin(cx) {
+				Poll::Ready(Some(Ok(_))) => {}
+				Poll::Pending => break,
+				Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer,
+			}
+		}
+		list.push(substream);
+	}
+}
diff --git a/substrate/client/network/src/protocol/generic_proto/handler/group.rs b/substrate/client/network/src/protocol/generic_proto/handler/group.rs
deleted file mode 100644
index fbfdb1cb6ab0e7115ef95ee567e0b98fd039d969..0000000000000000000000000000000000000000
--- a/substrate/client/network/src/protocol/generic_proto/handler/group.rs
+++ /dev/null
@@ -1,737 +0,0 @@
-// Copyright 2019-2020 Parity Technologies (UK) Ltd.
-// This file is part of Substrate.
-
-// Substrate is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Substrate is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
-
-//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming
-//! and outgoing substreams for all gossiping protocols together.
-//!
-//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the
-//! protocols that are Substrate-related and outside of the scope of libp2p.
-//!
-//! # Usage
-//!
-//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`.
-//!
-//! The `Initial` state is the state that the handler initially is in. It is a temporary state
-//! during which the user must either enable or disable the handler. After that, the handler stays
-//! either enabled or disabled.
-//!
-//! On the wire, we try to open the following substreams:
-//!
-//! - One substream for each notification protocol passed as parameter to the
-//!   `NotifsHandlerProto::new` function.
-//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback
-//!   in case the notification protocol can't be opened.
-//!
-//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the
-//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close
-//! (or abort opening) all these substreams. It is intended that in the future we allow states in
-//! which some protocols are open and not others. Symmetrically, we allow incoming
-//! Substrate-related substreams if and only if we are in the `Enabled` state.
-//!
-//! The user has the choice between sending a message with `SendNotification`, to send a
-//! notification, and `SendLegacy`, to send any other kind of message.
-//!
-
-use crate::protocol::generic_proto::{
-	handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut},
-	handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut},
-	handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut},
-	upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec},
-};
-
-use bytes::BytesMut;
-use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId};
-use libp2p::core::upgrade::{UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade};
-use libp2p::swarm::{
-	ProtocolsHandler, ProtocolsHandlerEvent,
-	IntoProtocolsHandler,
-	KeepAlive,
-	ProtocolsHandlerUpgrErr,
-	SubstreamProtocol,
-	NegotiatedSubstream,
-};
-use futures::{
-	channel::mpsc,
-	lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard},
-	prelude::*
-};
-use log::{debug, error};
-use parking_lot::{Mutex, RwLock};
-use std::{borrow::Cow, str, sync::Arc, task::{Context, Poll}};
-
-/// Number of pending notifications in asynchronous contexts.
-/// See [`NotificationsSink::reserve_notification`] for context.
-const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
-/// Number of pending notifications in synchronous contexts.
-const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048;
-
-/// Implements the `IntoProtocolsHandler` trait of libp2p.
-///
-/// Every time a connection with a remote starts, an instance of this struct is created and
-/// sent to a background task dedicated to this connection. Once the connection is established,
-/// it is turned into a [`NotifsHandler`].
-///
-/// See the documentation at the module level for more information.
-pub struct NotifsHandlerProto {
-	/// Prototypes for handlers for inbound substreams, and the message we respond with in the
-	/// handshake.
-	in_handlers: Vec<(NotifsInHandlerProto, Arc<RwLock<Vec<u8>>>)>,
-
-	/// Prototypes for handlers for outbound substreams, and the initial handshake message we send.
-	out_handlers: Vec<(NotifsOutHandlerProto, Arc<RwLock<Vec<u8>>>)>,
-
-	/// Prototype for handler for backwards-compatibility.
-	legacy: LegacyProtoHandlerProto,
-}
-
-/// The actual handler once the connection has been established.
-///
-/// See the documentation at the module level for more information.
-pub struct NotifsHandler {
-	/// Handlers for inbound substreams, and the message we respond with in the handshake.
-	in_handlers: Vec<(NotifsInHandler, Arc<RwLock<Vec<u8>>>)>,
-
-	/// Handlers for outbound substreams, and the initial handshake message we send.
-	out_handlers: Vec<(NotifsOutHandler, Arc<RwLock<Vec<u8>>>)>,
-
-	/// Whether we are the connection dialer or listener.
-	endpoint: ConnectedPoint,
-
-	/// Handler for backwards-compatibility.
-	legacy: LegacyProtoHandler,
-
-	/// In the situation where either the legacy substream has been opened or the handshake-bearing
-	/// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`]
-	/// event yet, this contains the received handshake waiting to be reported through the
-	/// external API.
-	pending_handshake: Option<Vec<u8>>,
-
-	/// State of this handler.
-	enabled: EnabledState,
-
-	/// If we receive inbound substream requests while in initialization mode,
-	/// we push the corresponding index here and process them when the handler
-	/// gets enabled/disabled.
-	pending_in: Vec<usize>,
-
-	/// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has
-	/// been sent out. The notifications to send out can be pulled from this receivers.
-	/// We use two different channels in order to have two different channel sizes, but from the
-	/// receiving point of view, the two channels are the same.
-	/// The receivers are fused in case the user drops the [`NotificationsSink`] entirely.
-	///
-	/// Contains `Some` if and only if it has been reported to the user that the substreams are
-	/// open.
-	notifications_sink_rx: Option<
-		stream::Select<
-			stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>,
-			stream::Fuse<mpsc::Receiver<NotificationsSinkMessage>>
-		>
-	>,
-}
-
-#[derive(Debug, Clone, PartialEq, Eq)]
-enum EnabledState {
-	Initial,
-	Enabled,
-	Disabled,
-}
-
-impl IntoProtocolsHandler for NotifsHandlerProto {
-	type Handler = NotifsHandler;
-
-	fn inbound_protocol(&self) -> SelectUpgrade<UpgradeCollec<NotificationsIn>, RegisteredProtocol> {
-		let in_handlers = self.in_handlers.iter()
-			.map(|(h, _)| h.inbound_protocol())
-			.collect::<UpgradeCollec<_>>();
-
-		SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol())
-	}
-
-	fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler {
-		NotifsHandler {
-			in_handlers: self.in_handlers
-				.into_iter()
-				.map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg))
-				.collect(),
-			out_handlers: self.out_handlers
-				.into_iter()
-				.map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg))
-				.collect(),
-			endpoint: connected_point.clone(),
-			legacy: self.legacy.into_handler(remote_peer_id, connected_point),
-			pending_handshake: None,
-			enabled: EnabledState::Initial,
-			pending_in: Vec::new(),
-			notifications_sink_rx: None,
-		}
-	}
-}
-
-/// Event that can be received by a `NotifsHandler`.
-#[derive(Debug, Clone)]
-pub enum NotifsHandlerIn {
-	/// The node should start using custom protocols.
-	Enable,
-
-	/// The node should stop using custom protocols.
-	Disable,
-}
-
-/// Event that can be emitted by a `NotifsHandler`.
-#[derive(Debug)]
-pub enum NotifsHandlerOut {
-	/// The connection is open for custom protocols.
-	Open {
-		/// The endpoint of the connection that is open for custom protocols.
-		endpoint: ConnectedPoint,
-		/// Handshake that was sent to us.
-		/// This is normally a "Status" message, but this out of the concern of this code.
-		received_handshake: Vec<u8>,
-		/// How notifications can be sent to this node.
-		notifications_sink: NotificationsSink,
-	},
-
-	/// The connection is closed for custom protocols.
-	Closed {
-		/// The reason for closing, for diagnostic purposes.
-		reason: Cow<'static, str>,
-		/// The endpoint of the connection that closed for custom protocols.
-		endpoint: ConnectedPoint,
-	},
-
-	/// Received a non-gossiping message on the legacy substream.
-	CustomMessage {
-		/// Message that has been received.
-		///
-		/// Keep in mind that this can be a `ConsensusMessage` message, which then contains a
-		/// notification.
-		message: BytesMut,
-	},
-
-	/// Received a message on a custom protocol substream.
-	Notification {
-		/// Name of the protocol of the message.
-		protocol_name: Cow<'static, str>,
-
-		/// Message that has been received.
-		message: BytesMut,
-	},
-}
-
-/// Sink connected directly to the node background task. Allows sending notifications to the peer.
-///
-/// Can be cloned in order to obtain multiple references to the same peer.
-#[derive(Debug, Clone)]
-pub struct NotificationsSink {
-	inner: Arc<NotificationsSinkInner>,
-}
-
-#[derive(Debug)]
-struct NotificationsSinkInner {
-	/// Sender to use in asynchronous contexts. Uses an asynchronous mutex.
-	async_channel: FuturesMutex<mpsc::Sender<NotificationsSinkMessage>>,
-	/// Sender to use in synchronous contexts. Uses a synchronous mutex.
-	/// This channel has a large capacity and is meant to be used in contexts where
-	/// back-pressure cannot be properly exerted.
-	/// It will be removed in a future version.
-	sync_channel: Mutex<mpsc::Sender<NotificationsSinkMessage>>,
-}
-
-/// Message emitted through the [`NotificationsSink`] and processed by the background task
-/// dedicated to the peer.
-#[derive(Debug)]
-enum NotificationsSinkMessage {
-	/// Message emitted by [`NotificationsSink::reserve_notification`] and
-	/// [`NotificationsSink::write_notification_now`].
-	Notification {
-		protocol_name: Cow<'static, str>,
-		message: Vec<u8>,
-	},
-
-	/// Must close the connection.
-	ForceClose,
-}
-
-impl NotificationsSink {
-	/// Sends a notification to the peer.
-	///
-	/// If too many messages are already buffered, the notification is silently discarded and the
-	/// connection to the peer will be closed shortly after.
-	///
-	/// The protocol name is expected to be checked ahead of calling this method. It is a logic
-	/// error to send a notification using an unknown protocol.
-	///
-	/// This method will be removed in a future version.
-	pub fn send_sync_notification<'a>(
-		&'a self,
-		protocol_name: Cow<'static, str>,
-		message: impl Into<Vec<u8>>
-	) {
-		let mut lock = self.inner.sync_channel.lock();
-		let result = lock.try_send(NotificationsSinkMessage::Notification {
-			protocol_name,
-			message: message.into()
-		});
-
-		if result.is_err() {
-			// Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the
-			// buffer, and therefore that `try_send` will succeed.
-			let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose);
-			debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected()));
-		}
-	}
-
-	/// Wait until the remote is ready to accept a notification.
-	///
-	/// Returns an error in the case where the connection is closed.
-	///
-	/// The protocol name is expected to be checked ahead of calling this method. It is a logic
-	/// error to send a notification using an unknown protocol.
-	pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result<Ready<'a>, ()> {
-		let mut lock = self.inner.async_channel.lock().await;
-
-		let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await;
-		if poll_ready.is_ok() {
-			Ok(Ready { protocol_name: protocol_name, lock })
-		} else {
-			Err(())
-		}
-	}
-}
-
-/// Notification slot is reserved and the notification can actually be sent.
-#[must_use]
-#[derive(Debug)]
-pub struct Ready<'a> {
-	/// Guarded channel. The channel inside is guaranteed to not be full.
-	lock: FuturesMutexGuard<'a, mpsc::Sender<NotificationsSinkMessage>>,
-	/// Name of the protocol. Should match one of the protocols passed at initialization.
-	protocol_name: Cow<'static, str>,
-}
-
-impl<'a> Ready<'a> {
-	/// Consumes this slots reservation and actually queues the notification.
-	///
-	/// Returns an error if the substream has been closed.
-	pub fn send(
-		mut self,
-		notification: impl Into<Vec<u8>>
-	) -> Result<(), ()> {
-		self.lock.start_send(NotificationsSinkMessage::Notification {
-			protocol_name: self.protocol_name,
-			message: notification.into(),
-		}).map_err(|_| ())
-	}
-}
-
-/// Error specific to the collection of protocols.
-#[derive(Debug, derive_more::Display, derive_more::Error)]
-pub enum NotifsHandlerError {
-	/// Channel of synchronous notifications is full.
-	SyncNotificationsClogged,
-	/// Error in legacy protocol.
-	Legacy(<LegacyProtoHandler as ProtocolsHandler>::Error),
-}
-
-impl NotifsHandlerProto {
-	/// Builds a new handler.
-	///
-	/// `list` is a list of notification protocols names, and the message to send as part of the
-	/// handshake. At the moment, the message is always the same whether we open a substream
-	/// ourselves or respond to handshake from the remote.
-	///
-	/// The first protocol in `list` is special-cased as the protocol that contains the handshake
-	/// to report through the [`NotifsHandlerOut::Open`] event.
-	///
-	/// # Panic
-	///
-	/// - Panics if `list` is empty.
-	///
-	pub fn new(
-		legacy: RegisteredProtocol,
-		list: impl Into<Vec<(Cow<'static, str>, Arc<RwLock<Vec<u8>>>)>>,
-	) -> Self {
-		let list = list.into();
-		assert!(!list.is_empty());
-
-		let out_handlers = list
-			.clone()
-			.into_iter()
-			.map(|(proto_name, initial_message)| {
-				(NotifsOutHandlerProto::new(proto_name), initial_message)
-			}).collect();
-
-		let in_handlers = list.clone()
-			.into_iter()
-			.map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg))
-			.collect();
-
-		NotifsHandlerProto {
-			in_handlers,
-			out_handlers,
-			legacy: LegacyProtoHandlerProto::new(legacy),
-		}
-	}
-}
-
-impl ProtocolsHandler for NotifsHandler {
-	type InEvent = NotifsHandlerIn;
-	type OutEvent = NotifsHandlerOut;
-	type Error = NotifsHandlerError;
-	type InboundProtocol = SelectUpgrade<UpgradeCollec<NotificationsIn>, RegisteredProtocol>;
-	type OutboundProtocol = NotificationsOut;
-	// Index within the `out_handlers`
-	type OutboundOpenInfo = usize;
-	type InboundOpenInfo = ();
-
-	fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
-		let in_handlers = self.in_handlers.iter()
-			.map(|(h, _)| h.listen_protocol().into_upgrade().1)
-			.collect::<UpgradeCollec<_>>();
-
-		let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1);
-		SubstreamProtocol::new(proto, ())
-	}
-
-	fn inject_fully_negotiated_inbound(
-		&mut self,
-		out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
-		(): ()
-	) {
-		match out {
-			EitherOutput::First((out, num)) =>
-				self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()),
-			EitherOutput::Second(out) =>
-				self.legacy.inject_fully_negotiated_inbound(out, ()),
-		}
-	}
-
-	fn inject_fully_negotiated_outbound(
-		&mut self,
-		out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
-		num: Self::OutboundOpenInfo
-	) {
-		self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ())
-	}
-
-	fn inject_event(&mut self, message: NotifsHandlerIn) {
-		match message {
-			NotifsHandlerIn::Enable => {
-				if let EnabledState::Enabled = self.enabled {
-					debug!("enabling already-enabled handler");
-				}
-				self.enabled = EnabledState::Enabled;
-				self.legacy.inject_event(LegacyProtoHandlerIn::Enable);
-				for (handler, initial_message) in &mut self.out_handlers {
-					// We create `initial_message` on a separate line to be sure that the lock
-					// is released as soon as possible.
-					let initial_message = initial_message.read().clone();
-					handler.inject_event(NotifsOutHandlerIn::Enable {
-						initial_message,
-					});
-				}
-				for num in self.pending_in.drain(..) {
-					// We create `handshake_message` on a separate line to be sure
-					// that the lock is released as soon as possible.
-					let handshake_message = self.in_handlers[num].1.read().clone();
-					self.in_handlers[num].0
-						.inject_event(NotifsInHandlerIn::Accept(handshake_message));
-				}
-			},
-			NotifsHandlerIn::Disable => {
-				if let EnabledState::Disabled = self.enabled {
-					debug!("disabling already-disabled handler");
-				}
-				self.legacy.inject_event(LegacyProtoHandlerIn::Disable);
-				// The notifications protocols start in the disabled state. If we were in the
-				// "Initial" state, then we shouldn't disable the notifications protocols again.
-				if self.enabled != EnabledState::Initial {
-					for (handler, _) in &mut self.out_handlers {
-						handler.inject_event(NotifsOutHandlerIn::Disable);
-					}
-				}
-				self.enabled = EnabledState::Disabled;
-				for num in self.pending_in.drain(..) {
-					self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse);
-				}
-			},
-		}
-	}
-
-	fn inject_dial_upgrade_error(
-		&mut self,
-		num: usize,
-		err: ProtocolsHandlerUpgrErr<NotificationsHandshakeError>
-	) {
-		match err {
-			ProtocolsHandlerUpgrErr::Timeout =>
-				self.out_handlers[num].0.inject_dial_upgrade_error(
-					(),
-					ProtocolsHandlerUpgrErr::Timeout
-				),
-			ProtocolsHandlerUpgrErr::Timer =>
-				self.out_handlers[num].0.inject_dial_upgrade_error(
-					(),
-					ProtocolsHandlerUpgrErr::Timer
-				),
-			ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) =>
-				self.out_handlers[num].0.inject_dial_upgrade_error(
-					(),
-					ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err))
-				),
-			ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) =>
-				self.out_handlers[num].0.inject_dial_upgrade_error(
-					(),
-					ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err))
-				),
-		}
-	}
-
-	fn connection_keep_alive(&self) -> KeepAlive {
-		// Iterate over each handler and return the maximum value.
-
-		let mut ret = self.legacy.connection_keep_alive();
-		if ret.is_yes() {
-			return KeepAlive::Yes;
-		}
-
-		for (handler, _) in &self.in_handlers {
-			let val = handler.connection_keep_alive();
-			if val.is_yes() {
-				return KeepAlive::Yes;
-			}
-			if ret < val { ret = val; }
-		}
-
-		for (handler, _) in &self.out_handlers {
-			let val = handler.connection_keep_alive();
-			if val.is_yes() {
-				return KeepAlive::Yes;
-			}
-			if ret < val { ret = val; }
-		}
-
-		ret
-	}
-
-	fn poll(
-		&mut self,
-		cx: &mut Context,
-	) -> Poll<
-		ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>
-	> {
-		if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx {
-			'poll_notifs_sink: loop {
-				// Before we poll the notifications sink receiver, check that all the notification
-				// channels are ready to send a message.
-				// TODO: it is planned that in the future we switch to one `NotificationsSink` per
-				// protocol, in which case each sink should wait only for its corresponding handler
-				// to be ready, and not all handlers
-				// see https://github.com/paritytech/substrate/issues/5670
-				for (out_handler, _) in &mut self.out_handlers {
-					match out_handler.poll_ready(cx) {
-						Poll::Ready(_) => {},
-						Poll::Pending => break 'poll_notifs_sink,
-					}
-				}
-
-				let message = match notifications_sink_rx.poll_next_unpin(cx) {
-					Poll::Ready(Some(msg)) => msg,
-					Poll::Ready(None) | Poll::Pending => break,
-				};
-
-				match message {
-					NotificationsSinkMessage::Notification {
-						protocol_name,
-						message
-					} => {
-						let mut found_any_with_name = false;
-
-						for (handler, _) in &mut self.out_handlers {
-							if *handler.protocol_name() == protocol_name {
-								found_any_with_name = true;
-								if handler.is_open() {
-									handler.send_or_discard(message);
-									continue 'poll_notifs_sink;
-								}
-							}
-						}
-
-						// This code can be reached via the following scenarios:
-						//
-						// - User tried to send a notification on a non-existing protocol. This
-						// most likely relates to https://github.com/paritytech/substrate/issues/6827
-						// - User tried to send a notification to a peer we're not or no longer
-						// connected to. This happens in a normal scenario due to the racy nature
-						// of connections and disconnections, and is benign.
-						//
-						// We print a warning in the former condition.
-						if !found_any_with_name {
-							log::warn!(
-								target: "sub-libp2p",
-								"Tried to send a notification on non-registered protocol: {:?}",
-								protocol_name
-							);
-						}
-					}
-					NotificationsSinkMessage::ForceClose => {
-						return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged));
-					}
-				}
-			}
-		}
-
-		// If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing
-		// substream (either the legacy substream or the one special-cased as providing the
-		// handshake) is open but the user isn't aware yet of the substreams being open.
-		// When that is the case, neither the legacy substream nor the incoming notifications
-		// substreams should be polled, otherwise there is a risk of receiving messages from them.
-		if self.pending_handshake.is_none() {
-			while let Poll::Ready(ev) = self.legacy.poll(cx) {
-				match ev {
-					ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } =>
-						match *protocol.info() {},
-					ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen {
-						received_handshake,
-						..
-					}) => {
-						if self.notifications_sink_rx.is_none() {
-							debug_assert!(self.pending_handshake.is_none());
-							self.pending_handshake = Some(received_handshake);
-						}
-						cx.waker().wake_by_ref();
-						return Poll::Pending;
-					},
-					ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => {
-						// We consciously drop the receivers despite notifications being potentially
-						// still buffered up.
-						self.notifications_sink_rx = None;
-
-						return Poll::Ready(ProtocolsHandlerEvent::Custom(
-							NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason }
-						))
-					},
-					ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => {
-						return Poll::Ready(ProtocolsHandlerEvent::Custom(
-							NotifsHandlerOut::CustomMessage { message }
-						))
-					},
-					ProtocolsHandlerEvent::Close(err) =>
-						return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))),
-				}
-			}
-		}
-
-		for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() {
-			loop {
-				let poll = if self.notifications_sink_rx.is_some() {
-					handler.poll(cx)
-				} else {
-					handler.poll_process(cx)
-				};
-
-				let ev = match poll {
-					Poll::Ready(e) => e,
-					Poll::Pending => break,
-				};
-
-				match ev {
-					ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } =>
-						error!("Incoming substream handler tried to open a substream"),
-					ProtocolsHandlerEvent::Close(err) => void::unreachable(err),
-					ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) =>
-						match self.enabled {
-							EnabledState::Initial => self.pending_in.push(handler_num),
-							EnabledState::Enabled => {
-								// We create `handshake_message` on a separate line to be sure
-								// that the lock is released as soon as possible.
-								let handshake_message = handshake_message.read().clone();
-								handler.inject_event(NotifsInHandlerIn::Accept(handshake_message))
-							},
-							EnabledState::Disabled =>
-								handler.inject_event(NotifsInHandlerIn::Refuse),
-						},
-					ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {},
-					ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => {
-						debug_assert!(self.pending_handshake.is_none());
-						if self.notifications_sink_rx.is_some() {
-							let msg = NotifsHandlerOut::Notification {
-								message,
-								protocol_name: handler.protocol_name().clone(),
-							};
-							return Poll::Ready(ProtocolsHandlerEvent::Custom(msg));
-						}
-					},
-				}
-			}
-		}
-
-		for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() {
-			while let Poll::Ready(ev) = handler.poll(cx) {
-				match ev {
-					ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } =>
-						return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
-							protocol: protocol
-								.map_info(|()| handler_num),
-						}),
-					ProtocolsHandlerEvent::Close(err) => void::unreachable(err),
-
-					// Opened substream on the handshake-bearing notification protocol.
-					ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake })
-						if handler_num == 0 =>
-					{
-						if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() {
-							self.pending_handshake = Some(handshake);
-						}
-					},
-
-					// Nothing to do in response to other notification substreams being opened
-					// or closed.
-					ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {},
-					ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {},
-					ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {},
-				}
-			}
-		}
-
-		if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) {
-			if let Some(handshake) = self.pending_handshake.take() {
-				let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE);
-				let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE);
-				let notifications_sink = NotificationsSink {
-					inner: Arc::new(NotificationsSinkInner {
-						async_channel: FuturesMutex::new(async_tx),
-						sync_channel: Mutex::new(sync_tx),
-					}),
-				};
-
-				debug_assert!(self.notifications_sink_rx.is_none());
-				self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse()));
-
-				return Poll::Ready(ProtocolsHandlerEvent::Custom(
-					NotifsHandlerOut::Open {
-						endpoint: self.endpoint.clone(),
-						received_handshake: handshake,
-						notifications_sink
-					}
-				))
-			}
-		}
-
-		Poll::Pending
-	}
-}
diff --git a/substrate/client/network/src/protocol/generic_proto/handler/legacy.rs b/substrate/client/network/src/protocol/generic_proto/handler/legacy.rs
deleted file mode 100644
index 404093553785c1369b71a88fe424c5cca4722232..0000000000000000000000000000000000000000
--- a/substrate/client/network/src/protocol/generic_proto/handler/legacy.rs
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2019-2020 Parity Technologies (UK) Ltd.
-// This file is part of Substrate.
-
-// Substrate is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Substrate is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
-
-use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream};
-use bytes::BytesMut;
-use futures::prelude::*;
-use futures_timer::Delay;
-use libp2p::core::{ConnectedPoint, PeerId, Endpoint};
-use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade};
-use libp2p::swarm::{
-	ProtocolsHandler, ProtocolsHandlerEvent,
-	IntoProtocolsHandler,
-	KeepAlive,
-	ProtocolsHandlerUpgrErr,
-	SubstreamProtocol,
-	NegotiatedSubstream,
-};
-use log::{debug, error};
-use smallvec::{smallvec, SmallVec};
-use std::{borrow::Cow, collections::VecDeque, convert::Infallible, error, fmt, io, mem};
-use std::{pin::Pin, task::{Context, Poll}, time::Duration};
-
-/// Implements the `IntoProtocolsHandler` trait of libp2p.
-///
-/// Every time a connection with a remote starts, an instance of this struct is created and
-/// sent to a background task dedicated to this connection. Once the connection is established,
-/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific
-/// to Substrate on that single connection.
-///
-/// Note that there can be multiple instance of this struct simultaneously for same peer,
-/// if there are multiple established connections to the peer.
-///
-/// ## State of the handler
-///
-/// There are six possible states for the handler:
-///
-/// - Enabled and open, which is a normal operation.
-/// - Enabled and closed, in which case it will try to open substreams.
-/// - Disabled and open, in which case it will try to close substreams.
-/// - Disabled and closed, in which case the handler is idle. The connection will be
-///   garbage-collected after a few seconds if nothing more happens.
-/// - Initializing and open.
-/// - Initializing and closed, which is the state the handler starts in.
-///
-/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or
-/// `Disable` messages to the handler. The handler itself never transitions automatically between
-/// these states. For example, if the handler reports a network misbehaviour, it will close the
-/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection
-/// to close. Otherwise, the handler will try to reopen substreams.
-///
-/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled
-/// as soon as possible.
-///
-/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen`
-/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the
-/// handler is open.
-///
-/// ## How it works
-///
-/// When the handler is created, it is initially in the `Init` state and waits for either a
-/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to
-/// toggle the handler between the disabled and enabled states.
-///
-/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named
-/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain.
-///
-/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we
-/// are still in "init" mode) and we are the connection listener, we don't open a substream.
-///
-/// In order the handle the situation where both the remote and us get enabled at the same time,
-/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary
-/// substream. The endpoints don't try to agree on a single substream.
-///
-/// We consider that we are now "closed" if the remote closes all the existing substreams.
-/// Re-opening it can then be performed by closing all active substream and re-opening one.
-///
-pub struct LegacyProtoHandlerProto {
-	/// Configuration for the protocol upgrade to negotiate.
-	protocol: RegisteredProtocol,
-}
-
-impl LegacyProtoHandlerProto {
-	/// Builds a new `LegacyProtoHandlerProto`.
-	pub fn new(protocol: RegisteredProtocol) -> Self {
-		LegacyProtoHandlerProto {
-			protocol,
-		}
-	}
-}
-
-impl IntoProtocolsHandler for LegacyProtoHandlerProto {
-	type Handler = LegacyProtoHandler;
-
-	fn inbound_protocol(&self) -> RegisteredProtocol {
-		self.protocol.clone()
-	}
-
-	fn into_handler(self, remote_peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler {
-		LegacyProtoHandler {
-			protocol: self.protocol,
-			remote_peer_id: remote_peer_id.clone(),
-			state: ProtocolState::Init {
-				substreams: SmallVec::new(),
-				init_deadline: Delay::new(Duration::from_secs(20))
-			},
-			events_queue: VecDeque::new(),
-		}
-	}
-}
-
-/// The actual handler once the connection has been established.
-pub struct LegacyProtoHandler {
-	/// Configuration for the protocol upgrade to negotiate.
-	protocol: RegisteredProtocol,
-
-	/// State of the communications with the remote.
-	state: ProtocolState,
-
-	/// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have
-	/// any influence on the behaviour.
-	remote_peer_id: PeerId,
-
-	/// Queue of events to send to the outside.
-	///
-	/// This queue must only ever be modified to insert elements at the back, or remove the first
-	/// element.
-	events_queue: VecDeque<
-		ProtocolsHandlerEvent<RegisteredProtocol, Infallible, LegacyProtoHandlerOut, ConnectionKillError>
-	>,
-}
-
-/// State of the handler.
-enum ProtocolState {
-	/// Waiting for the behaviour to tell the handler whether it is enabled or disabled.
-	Init {
-		/// List of substreams opened by the remote but that haven't been processed yet.
-		/// For each substream, also includes the handshake message that we have received.
-		substreams: SmallVec<[(RegisteredProtocolSubstream<NegotiatedSubstream>, Vec<u8>); 6]>,
-		/// Deadline after which the initialization is abnormally long.
-		init_deadline: Delay,
-	},
-
-	/// Handler is ready to accept incoming substreams.
-	/// If we are in this state, we haven't sent any `CustomProtocolOpen` yet.
-	Opening,
-
-	/// Normal operating mode. Contains the substreams that are open.
-	/// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside.
-	Normal {
-		/// The substreams where bidirectional communications happen.
-		substreams: SmallVec<[RegisteredProtocolSubstream<NegotiatedSubstream>; 4]>,
-		/// Contains substreams which are being shut down.
-		shutdown: SmallVec<[RegisteredProtocolSubstream<NegotiatedSubstream>; 4]>,
-	},
-
-	/// We are disabled. Contains substreams that are being closed.
-	/// If we are in this state, either we have sent a `CustomProtocolClosed` message to the
-	/// outside or we have never sent any `CustomProtocolOpen` in the first place.
-	Disabled {
-		/// List of substreams to shut down.
-		shutdown: SmallVec<[RegisteredProtocolSubstream<NegotiatedSubstream>; 6]>,
-
-		/// If true, we should reactivate the handler after all the substreams in `shutdown` have
-		/// been closed.
-		///
-		/// Since we don't want to mix old and new substreams, we wait for all old substreams to
-		/// be closed before opening any new one.
-		reenable: bool,
-	},
-
-	/// In this state, we don't care about anything anymore and need to kill the connection as soon
-	/// as possible.
-	KillAsap,
-
-	/// We sometimes temporarily switch to this state during processing. If we are in this state
-	/// at the beginning of a method, that means something bad happened in the source code.
-	Poisoned,
-}
-
-/// Event that can be received by a `LegacyProtoHandler`.
-#[derive(Debug)]
-pub enum LegacyProtoHandlerIn {
-	/// The node should start using custom protocols.
-	Enable,
-
-	/// The node should stop using custom protocols.
-	Disable,
-}
-
-/// Event that can be emitted by a `LegacyProtoHandler`.
-#[derive(Debug)]
-pub enum LegacyProtoHandlerOut {
-	/// Opened a custom protocol with the remote.
-	CustomProtocolOpen {
-		/// Version of the protocol that has been opened.
-		version: u8,
-		/// Handshake message that has been sent to us.
-		/// This is normally a "Status" message, but this out of the concern of this code.
-		received_handshake: Vec<u8>,
-	},
-
-	/// Closed a custom protocol with the remote.
-	CustomProtocolClosed {
-		/// Reason why the substream closed, for diagnostic purposes.
-		reason: Cow<'static, str>,
-	},
-
-	/// Receives a message on a custom protocol substream.
-	CustomMessage {
-		/// Message that has been received.
-		message: BytesMut,
-	},
-}
-
-impl LegacyProtoHandler {
-	/// Enables the handler.
-	fn enable(&mut self) {
-		self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) {
-			ProtocolState::Poisoned => {
-				error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state",
-					self.remote_peer_id);
-				ProtocolState::Poisoned
-			}
-
-			ProtocolState::Init { substreams: mut incoming, .. } => {
-				if incoming.is_empty() {
-					ProtocolState::Opening
-				} else {
-					let event = LegacyProtoHandlerOut::CustomProtocolOpen {
-						version: incoming[0].0.protocol_version(),
-						received_handshake: mem::replace(&mut incoming[0].1, Vec::new()),
-					};
-					self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event));
-					ProtocolState::Normal {
-						substreams: incoming.into_iter().map(|(s, _)| s).collect(),
-						shutdown: SmallVec::new()
-					}
-				}
-			}
-
-			st @ ProtocolState::KillAsap => st,
-			st @ ProtocolState::Opening { .. } => st,
-			st @ ProtocolState::Normal { .. } => st,
-			ProtocolState::Disabled { shutdown, .. } => {
-				ProtocolState::Disabled { shutdown, reenable: true }
-			}
-		}
-	}
-
-	/// Disables the handler.
-	fn disable(&mut self) {
-		self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) {
-			ProtocolState::Poisoned => {
-				error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state",
-					self.remote_peer_id);
-				ProtocolState::Poisoned
-			}
-
-			ProtocolState::Init { substreams: shutdown, .. } => {
-				let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::<SmallVec<[_; 6]>>();
-				for s in &mut shutdown {
-					s.shutdown();
-				}
-				ProtocolState::Disabled { shutdown, reenable: false }
-			}
-
-			ProtocolState::Opening { .. } | ProtocolState::Normal { .. } =>
-				// At the moment, if we get disabled while things were working, we kill the entire
-				// connection in order to force a reset of the state.
-				// This is obviously an extremely shameful way to do things, but at the time of
-				// the writing of this comment, the networking works very poorly and a solution
-				// needs to be found.
-				ProtocolState::KillAsap,
-
-			ProtocolState::Disabled { shutdown, .. } =>
-				ProtocolState::Disabled { shutdown, reenable: false },
-
-			ProtocolState::KillAsap => ProtocolState::KillAsap,
-		};
-	}
-
-	/// Polls the state for events. Optionally returns an event to produce.
-	#[must_use]
-	fn poll_state(&mut self, cx: &mut Context)
-		-> Option<ProtocolsHandlerEvent<RegisteredProtocol, Infallible, LegacyProtoHandlerOut, ConnectionKillError>> {
-		match mem::replace(&mut self.state, ProtocolState::Poisoned) {
-			ProtocolState::Poisoned => {
-				error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state",
-					self.remote_peer_id);
-				self.state = ProtocolState::Poisoned;
-				None
-			}
-
-			ProtocolState::Init { substreams, mut init_deadline } => {
-				match Pin::new(&mut init_deadline).poll(cx) {
-					Poll::Ready(()) => {
-						error!(target: "sub-libp2p", "Handler initialization process is too long \
-							with {:?}", self.remote_peer_id);
-						self.state = ProtocolState::KillAsap;
-					},
-					Poll::Pending => {
-						self.state = ProtocolState::Init { substreams, init_deadline };
-					}
-				}
-
-				None
-			}
-
-			ProtocolState::Opening => {
-				self.state = ProtocolState::Opening;
-				None
-			}
-
-			ProtocolState::Normal { mut substreams, mut shutdown } => {
-				for n in (0..substreams.len()).rev() {
-					let mut substream = substreams.swap_remove(n);
-					match Pin::new(&mut substream).poll_next(cx) {
-						Poll::Pending => substreams.push(substream),
-						Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => {
-							let event = LegacyProtoHandlerOut::CustomMessage {
-								message
-							};
-							substreams.push(substream);
-							self.state = ProtocolState::Normal { substreams, shutdown };
-							return Some(ProtocolsHandlerEvent::Custom(event));
-						},
-						Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => {
-							shutdown.push(substream);
-							if substreams.is_empty() {
-								let event = LegacyProtoHandlerOut::CustomProtocolClosed {
-									reason: "Legacy substream clogged".into(),
-								};
-								self.state = ProtocolState::Disabled {
-									shutdown: shutdown.into_iter().collect(),
-									reenable: true
-								};
-								return Some(ProtocolsHandlerEvent::Custom(event));
-							}
-						}
-						Poll::Ready(None) => {
-							shutdown.push(substream);
-							if substreams.is_empty() {
-								let event = LegacyProtoHandlerOut::CustomProtocolClosed {
-									reason: "All substreams have been closed by the remote".into(),
-								};
-								self.state = ProtocolState::Disabled {
-									shutdown: shutdown.into_iter().collect(),
-									reenable: true
-								};
-								return Some(ProtocolsHandlerEvent::Custom(event));
-							}
-						}
-						Poll::Ready(Some(Err(err))) => {
-							if substreams.is_empty() {
-								let event = LegacyProtoHandlerOut::CustomProtocolClosed {
-									reason: format!("Error on the last substream: {:?}", err).into(),
-								};
-								self.state = ProtocolState::Disabled {
-									shutdown: shutdown.into_iter().collect(),
-									reenable: true
-								};
-								return Some(ProtocolsHandlerEvent::Custom(event));
-							} else {
-								debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err);
-							}
-						}
-					}
-				}
-
-				// This code is reached is none if and only if none of the substreams are in a ready state.
-				self.state = ProtocolState::Normal { substreams, shutdown };
-				None
-			}
-
-			ProtocolState::Disabled { mut shutdown, reenable } => {
-				shutdown_list(&mut shutdown, cx);
-				// If `reenable` is `true`, that means we should open the substreams system again
-				// after all the substreams are closed.
-				if reenable && shutdown.is_empty() {
-					self.state = ProtocolState::Opening;
-				} else {
-					self.state = ProtocolState::Disabled { shutdown, reenable };
-				}
-				None
-			}
-
-			ProtocolState::KillAsap => None,
-		}
-	}
-}
-
-impl ProtocolsHandler for LegacyProtoHandler {
-	type InEvent = LegacyProtoHandlerIn;
-	type OutEvent = LegacyProtoHandlerOut;
-	type Error = ConnectionKillError;
-	type InboundProtocol = RegisteredProtocol;
-	type OutboundProtocol = RegisteredProtocol;
-	type OutboundOpenInfo = Infallible;
-	type InboundOpenInfo = ();
-
-	fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
-		SubstreamProtocol::new(self.protocol.clone(), ())
-	}
-
-	fn inject_fully_negotiated_inbound(
-		&mut self,
-		(mut substream, received_handshake): <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
-		(): ()
-	) {
-		self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) {
-			ProtocolState::Poisoned => {
-				error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state",
-					self.remote_peer_id);
-				ProtocolState::Poisoned
-			}
-
-			ProtocolState::Init { mut substreams, init_deadline } => {
-				if substream.endpoint() == Endpoint::Dialer {
-					error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \
-						initialization", self.remote_peer_id);
-				}
-				substreams.push((substream, received_handshake));
-				ProtocolState::Init { substreams, init_deadline }
-			}
-
-			ProtocolState::Opening { .. } => {
-				let event = LegacyProtoHandlerOut::CustomProtocolOpen {
-					version: substream.protocol_version(),
-					received_handshake,
-				};
-				self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event));
-				ProtocolState::Normal {
-					substreams: smallvec![substream],
-					shutdown: SmallVec::new()
-				}
-			}
-
-			ProtocolState::Normal { substreams: mut existing, shutdown } => {
-				existing.push(substream);
-				ProtocolState::Normal { substreams: existing, shutdown }
-			}
-
-			ProtocolState::Disabled { mut shutdown, .. } => {
-				substream.shutdown();
-				shutdown.push(substream);
-				ProtocolState::Disabled { shutdown, reenable: false }
-			}
-
-			ProtocolState::KillAsap => ProtocolState::KillAsap,
-		};
-	}
-
-	fn inject_fully_negotiated_outbound(
-		&mut self,
-		_: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
-		unreachable: Self::OutboundOpenInfo
-	) {
-		match unreachable {}
-	}
-
-	fn inject_event(&mut self, message: LegacyProtoHandlerIn) {
-		match message {
-			LegacyProtoHandlerIn::Disable => self.disable(),
-			LegacyProtoHandlerIn::Enable => self.enable(),
-		}
-	}
-
-	fn inject_dial_upgrade_error(
-		&mut self,
-		unreachable: Self::OutboundOpenInfo,
-		_: ProtocolsHandlerUpgrErr<io::Error>
-	) {
-		match unreachable {}
-	}
-
-	fn connection_keep_alive(&self) -> KeepAlive {
-		match self.state {
-			ProtocolState::Init { .. } | ProtocolState::Normal { .. } => KeepAlive::Yes,
-			ProtocolState::Opening { .. } | ProtocolState::Disabled { .. } |
-			ProtocolState::Poisoned | ProtocolState::KillAsap => KeepAlive::No,
-		}
-	}
-
-	fn poll(
-		&mut self,
-		cx: &mut Context,
-	) -> Poll<
-		ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>
-	> {
-		// Flush the events queue if necessary.
-		if let Some(event) = self.events_queue.pop_front() {
-			return Poll::Ready(event)
-		}
-
-		// Kill the connection if needed.
-		if let ProtocolState::KillAsap = self.state {
-			return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError));
-		}
-
-		// Process all the substreams.
-		if let Some(event) = self.poll_state(cx) {
-			return Poll::Ready(event)
-		}
-
-		Poll::Pending
-	}
-}
-
-impl fmt::Debug for LegacyProtoHandler {
-	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
-		f.debug_struct("LegacyProtoHandler")
-			.finish()
-	}
-}
-
-/// Given a list of substreams, tries to shut them down. The substreams that have been successfully
-/// shut down are removed from the list.
-fn shutdown_list
-	(list: &mut SmallVec<impl smallvec::Array<Item = RegisteredProtocolSubstream<NegotiatedSubstream>>>,
-	cx: &mut Context)
-{
-	'outer: for n in (0..list.len()).rev() {
-		let mut substream = list.swap_remove(n);
-		loop {
-			match substream.poll_next_unpin(cx) {
-				Poll::Ready(Some(Ok(_))) => {}
-				Poll::Pending => break,
-				Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer,
-			}
-		}
-		list.push(substream);
-	}
-}
-
-/// Error returned when switching from normal to disabled.
-#[derive(Debug)]
-pub struct ConnectionKillError;
-
-impl error::Error for ConnectionKillError {
-}
-
-impl fmt::Display for ConnectionKillError {
-	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-		write!(f, "Connection kill when switching from normal to disabled")
-	}
-}
diff --git a/substrate/client/network/src/protocol/generic_proto/handler/notif_in.rs b/substrate/client/network/src/protocol/generic_proto/handler/notif_in.rs
deleted file mode 100644
index d3b505e0de3e26fcb6c5cb234fa8e8a7ecd48b10..0000000000000000000000000000000000000000
--- a/substrate/client/network/src/protocol/generic_proto/handler/notif_in.rs
+++ /dev/null
@@ -1,293 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) 2020 Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing
-//! substreams for a single gossiping protocol.
-//!
-//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple
-//! >			protocols, you need to create multiple instances and group them.
-//!
-
-use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream};
-use bytes::BytesMut;
-use futures::prelude::*;
-use libp2p::core::{ConnectedPoint, PeerId};
-use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade};
-use libp2p::swarm::{
-	ProtocolsHandler, ProtocolsHandlerEvent,
-	IntoProtocolsHandler,
-	KeepAlive,
-	ProtocolsHandlerUpgrErr,
-	SubstreamProtocol,
-	NegotiatedSubstream,
-};
-use log::{error, warn};
-use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}};
-
-/// Implements the `IntoProtocolsHandler` trait of libp2p.
-///
-/// Every time a connection with a remote starts, an instance of this struct is created and
-/// sent to a background task dedicated to this connection. Once the connection is established,
-/// it is turned into a [`NotifsInHandler`].
-pub struct NotifsInHandlerProto {
-	/// Configuration for the protocol upgrade to negotiate.
-	in_protocol: NotificationsIn,
-}
-
-/// The actual handler once the connection has been established.
-pub struct NotifsInHandler {
-	/// Configuration for the protocol upgrade to negotiate for inbound substreams.
-	in_protocol: NotificationsIn,
-
-	/// Substream that is open with the remote.
-	substream: Option<NotificationsInSubstream<NegotiatedSubstream>>,
-
-	/// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and
-	/// `Closed` messages in a row without the handler having time to respond with `Accept` or
-	/// `Refuse`.
-	///
-	/// In order to keep the state consistent, we increment this variable every time an
-	/// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received.
-	pending_accept_refuses: usize,
-
-	/// Queue of events to send to the outside.
-	///
-	/// This queue is only ever modified to insert elements at the back, or remove the first
-	/// element.
-	events_queue: VecDeque<ProtocolsHandlerEvent<DeniedUpgrade, (), NotifsInHandlerOut, void::Void>>,
-}
-
-/// Event that can be received by a `NotifsInHandler`.
-#[derive(Debug, Clone)]
-pub enum NotifsInHandlerIn {
-	/// Can be sent back as a response to an `OpenRequest`. Contains the status message to send
-	/// to the remote.
-	///
-	/// After sending this to the handler, the substream is now considered open and `Notif` events
-	/// can be received.
-	Accept(Vec<u8>),
-
-	/// Can be sent back as a response to an `OpenRequest`.
-	Refuse,
-}
-
-/// Event that can be emitted by a `NotifsInHandler`.
-#[derive(Debug)]
-pub enum NotifsInHandlerOut {
-	/// The remote wants to open a substream. Contains the initial message sent by the remote
-	/// when the substream has been opened.
-	///
-	/// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent
-	/// back even if a `Closed` is received.
-	OpenRequest(Vec<u8>),
-
-	/// The notifications substream has been closed by the remote. In order to avoid race
-	/// conditions, this does **not** cancel any previously-sent `OpenRequest`.
-	Closed,
-
-	/// Received a message on the notifications substream.
-	///
-	/// Can only happen after an `Accept` and before a `Closed`.
-	Notif(BytesMut),
-}
-
-impl NotifsInHandlerProto {
-	/// Builds a new `NotifsInHandlerProto`.
-	pub fn new(
-		protocol_name: impl Into<Cow<'static, str>>
-	) -> Self {
-		NotifsInHandlerProto {
-			in_protocol: NotificationsIn::new(protocol_name),
-		}
-	}
-}
-
-impl IntoProtocolsHandler for NotifsInHandlerProto {
-	type Handler = NotifsInHandler;
-
-	fn inbound_protocol(&self) -> NotificationsIn {
-		self.in_protocol.clone()
-	}
-
-	fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler {
-		NotifsInHandler {
-			in_protocol: self.in_protocol,
-			substream: None,
-			pending_accept_refuses: 0,
-			events_queue: VecDeque::new(),
-		}
-	}
-}
-
-impl NotifsInHandler {
-	/// Returns the name of the protocol that we accept.
-	pub fn protocol_name(&self) -> &Cow<'static, str> {
-		self.in_protocol.protocol_name()
-	}
-
-	/// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to
-	/// never generate [`NotifsInHandlerOut::Notif`].
-	///
-	/// Use this method in situations where it is not desirable to receive events but still
-	/// necessary to drive any potential incoming handshake or request.
-	pub fn poll_process(
-		&mut self,
-		cx: &mut Context
-	) -> Poll<
-		ProtocolsHandlerEvent<DeniedUpgrade, (), NotifsInHandlerOut, void::Void>
-	> {
-		if let Some(event) = self.events_queue.pop_front() {
-			return Poll::Ready(event)
-		}
-
-		match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) {
-			None | Some(Poll::Pending) => {},
-			Some(Poll::Ready(Ok(v))) => match v {},
-			Some(Poll::Ready(Err(_))) => {
-				self.substream = None;
-				return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed));
-			},
-		}
-
-		Poll::Pending
-	}
-}
-
-impl ProtocolsHandler for NotifsInHandler {
-	type InEvent = NotifsInHandlerIn;
-	type OutEvent = NotifsInHandlerOut;
-	type Error = void::Void;
-	type InboundProtocol = NotificationsIn;
-	type OutboundProtocol = DeniedUpgrade;
-	type OutboundOpenInfo = ();
-	type InboundOpenInfo = ();
-
-	fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
-		SubstreamProtocol::new(self.in_protocol.clone(), ())
-	}
-
-	fn inject_fully_negotiated_inbound(
-		&mut self,
-		(msg, proto): <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
-		(): ()
-	) {
-		// If a substream already exists, we drop it and replace it with the new incoming one.
-		if self.substream.is_some() {
-			self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed));
-		}
-
-		// Note that we drop the existing substream, which will send an equivalent to a TCP "RST"
-		// to the remote and force-close the substream. It might seem like an unclean way to get
-		// rid of a substream. However, keep in mind that it is invalid for the remote to open
-		// multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do.
-		self.substream = Some(proto);
-
-		self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg)));
-		self.pending_accept_refuses = self.pending_accept_refuses
-			.checked_add(1)
-			.unwrap_or_else(|| {
-				error!(target: "sub-libp2p", "Overflow in pending_accept_refuses");
-				usize::max_value()
-			});
-	}
-
-	fn inject_fully_negotiated_outbound(
-		&mut self,
-		out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
-		_: Self::OutboundOpenInfo
-	) {
-		// We never emit any outgoing substream.
-		void::unreachable(out)
-	}
-
-	fn inject_event(&mut self, message: NotifsInHandlerIn) {
-		self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) {
-			Some(v) => v,
-			None => {
-				error!(
-					target: "sub-libp2p",
-					"Inconsistent state: received Accept/Refuse when no pending request exists"
-				);
-				return;
-			}
-		};
-
-		// If we send multiple `OpenRequest`s in a row, we will receive back multiple
-		// `Accept`/`Refuse` messages. All of them are obsolete except the last one.
-		if self.pending_accept_refuses != 0 {
-			return;
-		}
-
-		match (message, self.substream.as_mut()) {
-			(NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message),
-			(NotifsInHandlerIn::Accept(_), None) => {},
-			(NotifsInHandlerIn::Refuse, _) => self.substream = None,
-		}
-	}
-
-	fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr<void::Void>) {
-		error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler");
-	}
-
-	fn connection_keep_alive(&self) -> KeepAlive {
-		if self.substream.is_some() {
-			KeepAlive::Yes
-		} else {
-			KeepAlive::No
-		}
-	}
-
-	fn poll(
-		&mut self,
-		cx: &mut Context,
-	) -> Poll<
-		ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>
-	> {
-		// Flush the events queue if necessary.
-		if let Some(event) = self.events_queue.pop_front() {
-			return Poll::Ready(event)
-		}
-
-		match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) {
-			None | Some(Poll::Pending) => {},
-			Some(Poll::Ready(Some(Ok(msg)))) => {
-				if self.pending_accept_refuses != 0 {
-					warn!(
-						target: "sub-libp2p",
-						"Bad state in inbound-only handler: notif before accepting substream"
-					);
-				}
-				return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg)))
-			},
-			Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => {
-				self.substream = None;
-				return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed));
-			},
-		}
-
-		Poll::Pending
-	}
-}
-
-impl fmt::Debug for NotifsInHandler {
-	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
-		f.debug_struct("NotifsInHandler")
-			.field("substream_open", &self.substream.is_some())
-			.finish()
-	}
-}
diff --git a/substrate/client/network/src/protocol/generic_proto/handler/notif_out.rs b/substrate/client/network/src/protocol/generic_proto/handler/notif_out.rs
deleted file mode 100644
index 414e62c0d135fb80cde4b5f15bea63cc7e50d6aa..0000000000000000000000000000000000000000
--- a/substrate/client/network/src/protocol/generic_proto/handler/notif_out.rs
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2019-2020 Parity Technologies (UK) Ltd.
-// This file is part of Substrate.
-
-// Substrate is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Substrate is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
-
-//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing
-//! substreams of a single gossiping protocol.
-//!
-//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple
-//! >			protocols, you need to create multiple instances and group them.
-//!
-
-use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError};
-use futures::prelude::*;
-use libp2p::core::{ConnectedPoint, PeerId};
-use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade};
-use libp2p::swarm::{
-	ProtocolsHandler, ProtocolsHandlerEvent,
-	IntoProtocolsHandler,
-	KeepAlive,
-	ProtocolsHandlerUpgrErr,
-	SubstreamProtocol,
-	NegotiatedSubstream,
-};
-use log::{debug, warn, error};
-use std::{
-	borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker},
-	time::Duration
-};
-use wasm_timer::Instant;
-
-/// Maximum duration to open a substream and receive the handshake message. After that, we
-/// consider that we failed to open the substream.
-const OPEN_TIMEOUT: Duration = Duration::from_secs(10);
-/// After successfully establishing a connection with the remote, we keep the connection open for
-/// at least this amount of time in order to give the rest of the code the chance to notify us to
-/// open substreams.
-const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5);
-
-/// Implements the `IntoProtocolsHandler` trait of libp2p.
-///
-/// Every time a connection with a remote starts, an instance of this struct is created and
-/// sent to a background task dedicated to this connection. Once the connection is established,
-/// it is turned into a [`NotifsOutHandler`].
-///
-/// See the documentation of [`NotifsOutHandler`] for more information.
-pub struct NotifsOutHandlerProto {
-	/// Name of the protocol to negotiate.
-	protocol_name: Cow<'static, str>,
-}
-
-impl NotifsOutHandlerProto {
-	/// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the
-	/// notifications substream.
-	pub fn new(protocol_name: impl Into<Cow<'static, str>>) -> Self {
-		NotifsOutHandlerProto {
-			protocol_name: protocol_name.into(),
-		}
-	}
-}
-
-impl IntoProtocolsHandler for NotifsOutHandlerProto {
-	type Handler = NotifsOutHandler;
-
-	fn inbound_protocol(&self) -> DeniedUpgrade {
-		DeniedUpgrade
-	}
-
-	fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler {
-		NotifsOutHandler {
-			protocol_name: self.protocol_name,
-			when_connection_open: Instant::now(),
-			state: State::Disabled,
-			events_queue: VecDeque::new(),
-		}
-	}
-}
-
-/// Handler for an outbound notification substream.
-///
-/// When a connection is established, this handler starts in the "disabled" state, meaning that
-/// no substream will be open.
-///
-/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the
-/// handler. Once done, the handler will try to establish then maintain an outbound substream with
-/// the remote for the purpose of sending notifications to it.
-pub struct NotifsOutHandler {
-	/// Name of the protocol to negotiate.
-	protocol_name: Cow<'static, str>,
-
-	/// Relationship with the node we're connected to.
-	state: State,
-
-	/// When the connection with the remote has been successfully established.
-	when_connection_open: Instant,
-
-	/// Queue of events to send to the outside.
-	///
-	/// This queue must only ever be modified to insert elements at the back, or remove the first
-	/// element.
-	events_queue: VecDeque<ProtocolsHandlerEvent<NotificationsOut, (), NotifsOutHandlerOut, void::Void>>,
-}
-
-/// Our relationship with the node we're connected to.
-enum State {
-	/// The handler is disabled and idle. No substream is open.
-	Disabled,
-
-	/// The handler is disabled. A substream is still open and needs to be closed.
-	///
-	/// > **Important**: Having this state means that `poll_close` has been called at least once,
-	/// >				 but the `Sink` API is unclear about whether or not the stream can then
-	/// >				 be recovered. Because of that, we must never switch from the
-	/// >				 `DisabledOpen` state to the `Open` state while keeping the same substream.
-	DisabledOpen(NotificationsOutSubstream<NegotiatedSubstream>),
-
-	/// The handler is disabled but we are still trying to open a substream with the remote.
-	///
-	/// If the handler gets enabled again, we can immediately switch to `Opening`.
-	DisabledOpening,
-
-	/// The handler is enabled and we are trying to open a substream with the remote.
-	Opening {
-		/// The initial message that we sent. Necessary if we need to re-open a substream.
-		initial_message: Vec<u8>,
-	},
-
-	/// The handler is enabled. We have tried opening a substream in the past but the remote
-	/// refused it.
-	Refused,
-
-	/// The handler is enabled and substream is open.
-	Open {
-		/// Substream that is currently open.
-		substream: NotificationsOutSubstream<NegotiatedSubstream>,
-		/// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify
-		/// when the open substream closes due to being disabled or encountering an
-		/// error, i.e. to notify the task as soon as the substream becomes unavailable,
-		/// without waiting for an underlying I/O task wakeup.
-		close_waker: Option<Waker>,
-		/// The initial message that we sent. Necessary if we need to re-open a substream.
-		initial_message: Vec<u8>,
-	},
-
-	/// Poisoned state. Shouldn't be found in the wild.
-	Poisoned,
-}
-
-/// Event that can be received by a `NotifsOutHandler`.
-#[derive(Debug)]
-pub enum NotifsOutHandlerIn {
-	/// Enables the notifications substream for this node. The handler will try to maintain a
-	/// substream with the remote.
-	Enable {
-		/// Initial message to send to remote nodes when we open substreams.
-		initial_message: Vec<u8>,
-	},
-
-	/// Disables the notifications substream for this node. This is the default state.
-	Disable,
-}
-
-/// Event that can be emitted by a `NotifsOutHandler`.
-#[derive(Debug)]
-pub enum NotifsOutHandlerOut {
-	/// The notifications substream has been accepted by the remote.
-	Open {
-		/// Handshake message sent by the remote after we opened the substream.
-		handshake: Vec<u8>,
-	},
-
-	/// The notifications substream has been closed by the remote.
-	Closed,
-
-	/// We tried to open a notifications substream, but the remote refused it.
-	///
-	/// Can only happen if we're in a closed state.
-	Refused,
-}
-
-impl NotifsOutHandler {
-	/// Returns true if the substream is currently open.
-	pub fn is_open(&self) -> bool {
-		match &self.state {
-			State::Disabled => false,
-			State::DisabledOpening => false,
-			State::DisabledOpen(_) => true,
-			State::Opening { .. } => false,
-			State::Refused => false,
-			State::Open { .. } => true,
-			State::Poisoned => false,
-		}
-	}
-
-	/// Returns `true` if there has been an attempt to open the substream, but the remote refused
-	/// the substream.
-	///
-	/// Always returns `false` if the handler is in a disabled state.
-	pub fn is_refused(&self) -> bool {
-		match &self.state {
-			State::Disabled => false,
-			State::DisabledOpening => false,
-			State::DisabledOpen(_) => false,
-			State::Opening { .. } => false,
-			State::Refused => true,
-			State::Open { .. } => false,
-			State::Poisoned => false,
-		}
-	}
-
-	/// Returns the name of the protocol that we negotiate.
-	pub fn protocol_name(&self) -> &Cow<'static, str> {
-		&self.protocol_name
-	}
-
-	/// Polls whether the outbound substream is ready to send a notification.
-	///
-	/// - Returns `Poll::Pending` if the substream is open but not ready to send a notification.
-	/// - Returns `Poll::Ready(true)` if the substream is ready to send a notification.
-	/// - Returns `Poll::Ready(false)` if the substream is closed.
-	///
-	pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<bool> {
-		if let State::Open { substream, close_waker, .. } = &mut self.state {
-			match substream.poll_ready_unpin(cx) {
-				Poll::Ready(Ok(())) => Poll::Ready(true),
-				Poll::Ready(Err(_)) => Poll::Ready(false),
-				Poll::Pending => {
-					*close_waker = Some(cx.waker().clone());
-					Poll::Pending
-				}
-			}
-		} else {
-			Poll::Ready(false)
-		}
-	}
-
-	/// Sends out a notification.
-	///
-	/// If the substream is closed, or not ready to send out a notification yet, then the
-	/// notification is silently discarded.
-	///
-	/// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine
-	/// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send
-	/// out a notification.
-	pub fn send_or_discard(&mut self, notification: Vec<u8>) {
-		if let State::Open { substream, .. } = &mut self.state {
-			let _ = substream.start_send_unpin(notification);
-		}
-	}
-}
-
-impl ProtocolsHandler for NotifsOutHandler {
-	type InEvent = NotifsOutHandlerIn;
-	type OutEvent = NotifsOutHandlerOut;
-	type Error = void::Void;
-	type InboundProtocol = DeniedUpgrade;
-	type OutboundProtocol = NotificationsOut;
-	type OutboundOpenInfo = ();
-	type InboundOpenInfo = ();
-
-	fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
-		SubstreamProtocol::new(DeniedUpgrade, ())
-	}
-
-	fn inject_fully_negotiated_inbound(
-		&mut self,
-		proto: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
-		(): ()
-	) {
-		// We should never reach here. `proto` is a `Void`.
-		void::unreachable(proto)
-	}
-
-	fn inject_fully_negotiated_outbound(
-		&mut self,
-		(handshake_msg, substream): <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
-		_: ()
-	) {
-		match mem::replace(&mut self.state, State::Poisoned) {
-			State::Opening { initial_message } => {
-				let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg };
-				self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev));
-				self.state = State::Open { substream, initial_message, close_waker: None };
-			},
-			// If the handler was disabled while we were negotiating the protocol, immediately
-			// close it.
-			State::DisabledOpening => self.state = State::DisabledOpen(substream),
-
-			// Any other situation should never happen.
-			State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) =>
-				error!("☎️ State mismatch in notifications handler: substream already open"),
-			State::Poisoned => error!("☎️ Notifications handler in a poisoned state"),
-		}
-	}
-
-	fn inject_event(&mut self, message: NotifsOutHandlerIn) {
-		match message {
-			NotifsOutHandlerIn::Enable { initial_message } => {
-				match mem::replace(&mut self.state, State::Poisoned) {
-					State::Disabled => {
-						let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone());
-						self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest {
-							protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT),
-						});
-						self.state = State::Opening { initial_message };
-					},
-					State::DisabledOpening => self.state = State::Opening { initial_message },
-					State::DisabledOpen(mut sub) => {
-						// As documented above, in this state we have already called `poll_close`
-						// once on the substream, and it is unclear whether the substream can then
-						// be recovered. When in doubt, let's drop the existing substream and
-						// open a new one.
-						if sub.close().now_or_never().is_none() {
-							warn!(
-								target: "sub-libp2p",
-								"📞 Improperly closed outbound notifications substream"
-							);
-						}
-
-						let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone());
-						self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest {
-							protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT),
-						});
-						self.state = State::Opening { initial_message };
-					},
-					st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => {
-						debug!(target: "sub-libp2p",
-							"Tried to enable notifications handler that was already enabled");
-						self.state = st;
-					}
-					State::Poisoned => error!("Notifications handler in a poisoned state"),
-				}
-			}
-
-			NotifsOutHandlerIn::Disable => {
-				match mem::replace(&mut self.state, State::Poisoned) {
-					st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => {
-						debug!(target: "sub-libp2p",
-							"Tried to disable notifications handler that was already disabled");
-						self.state = st;
-					}
-					State::Opening { .. } => self.state = State::DisabledOpening,
-					State::Refused => self.state = State::Disabled,
-					State::Open { substream, close_waker, .. } => {
-						if let Some(close_waker) = close_waker {
-							close_waker.wake();
-						}
-						self.state = State::DisabledOpen(substream)
-					},
-					State::Poisoned => error!("☎️ Notifications handler in a poisoned state"),
-				}
-			}
-		}
-	}
-
-	fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr<NotificationsHandshakeError>) {
-		match mem::replace(&mut self.state, State::Poisoned) {
-			State::Disabled => {},
-			State::DisabledOpen(_) | State::Refused | State::Open { .. } =>
-				error!("☎️ State mismatch in NotificationsOut"),
-			State::Opening { .. } => {
-				self.state = State::Refused;
-				let ev = NotifsOutHandlerOut::Refused;
-				self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev));
-			},
-			State::DisabledOpening => self.state = State::Disabled,
-			State::Poisoned => error!("☎️ Notifications handler in a poisoned state"),
-		}
-	}
-
-	fn connection_keep_alive(&self) -> KeepAlive {
-		match self.state {
-			// We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the
-			// connection open no matter what, in order to avoid closing and reopening
-			// connections all the time.
-			State::Disabled | State::DisabledOpen(_) | State::DisabledOpening =>
-				KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME),
-			State::Opening { .. } | State::Open { .. } => KeepAlive::Yes,
-			State::Refused | State::Poisoned => KeepAlive::No,
-		}
-	}
-
-	fn poll(
-		&mut self,
-		cx: &mut Context,
-	) -> Poll<ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>> {
-		// Flush the events queue if necessary.
-		if let Some(event) = self.events_queue.pop_front() {
-			return Poll::Ready(event)
-		}
-
-		match &mut self.state {
-			State::Open { substream, initial_message, close_waker } =>
-				match Sink::poll_flush(Pin::new(substream), cx) {
-					Poll::Pending | Poll::Ready(Ok(())) => {},
-					Poll::Ready(Err(_)) => {
-						if let Some(close_waker) = close_waker.take() {
-							close_waker.wake();
-						}
-
-						// We try to re-open a substream.
-						let initial_message = mem::replace(initial_message, Vec::new());
-						self.state = State::Opening { initial_message: initial_message.clone() };
-						let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message);
-						self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest {
-							protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT),
-						});
-						return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed));
-					}
-				},
-
-			State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) {
-				Poll::Pending => {},
-				Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => {
-					self.state = State::Disabled;
-					return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed));
-				},
-			},
-
-			_ => {}
-		}
-
-		Poll::Pending
-	}
-}
-
-impl fmt::Debug for NotifsOutHandler {
-	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
-		f.debug_struct("NotifsOutHandler")
-			.field("open", &self.is_open())
-			.finish()
-	}
-}
diff --git a/substrate/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/substrate/client/network/src/protocol/generic_proto/upgrade/legacy.rs
index 1b2b97253d1aed125a2a39f5d3827e8d4f1e47f8..91282d0cf57dd9e751a208e81d2720f3cf949bb2 100644
--- a/substrate/client/network/src/protocol/generic_proto/upgrade/legacy.rs
+++ b/substrate/client/network/src/protocol/generic_proto/upgrade/legacy.rs
@@ -20,7 +20,7 @@ use crate::config::ProtocolId;
 use bytes::BytesMut;
 use futures::prelude::*;
 use futures_codec::Framed;
-use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName};
+use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName};
 use parking_lot::RwLock;
 use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter};
 use std::task::{Context, Poll};
@@ -85,34 +85,18 @@ impl Clone for RegisteredProtocol {
 pub struct RegisteredProtocolSubstream<TSubstream> {
 	/// If true, we are in the process of closing the sink.
 	is_closing: bool,
-	/// Whether the local node opened this substream (dialer), or we received this substream from
-	/// the remote (listener).
-	endpoint: Endpoint,
 	/// Buffer of packets to send.
 	send_queue: VecDeque<BytesMut>,
 	/// If true, we should call `poll_complete` on the inner sink.
 	requires_poll_flush: bool,
 	/// The underlying substream.
 	inner: stream::Fuse<Framed<TSubstream, UviBytes<BytesMut>>>,
-	/// Version of the protocol that was negotiated.
-	protocol_version: u8,
 	/// If true, we have sent a "remote is clogged" event recently and shouldn't send another one
 	/// unless the buffer empties then fills itself again.
 	clogged_fuse: bool,
 }
 
 impl<TSubstream> RegisteredProtocolSubstream<TSubstream> {
-	/// Returns the version of the protocol that was negotiated.
-	pub fn protocol_version(&self) -> u8 {
-		self.protocol_version
-	}
-
-	/// Returns whether the local node opened this substream (dialer), or we received this
-	/// substream from the remote (listener).
-	pub fn endpoint(&self) -> Endpoint {
-		self.endpoint
-	}
-
 	/// Starts a graceful shutdown process on this substream.
 	///
 	/// Note that "graceful" means that we sent a closing message. We don't wait for any
@@ -246,7 +230,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static,
 	fn upgrade_inbound(
 		self,
 		socket: TSubstream,
-		info: Self::Info,
+		_: Self::Info,
 	) -> Self::Future {
 		Box::pin(async move {
 			let mut framed = {
@@ -262,11 +246,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static,
 
 			Ok((RegisteredProtocolSubstream {
 				is_closing: false,
-				endpoint: Endpoint::Listener,
 				send_queue: VecDeque::new(),
 				requires_poll_flush: false,
 				inner: framed.fuse(),
-				protocol_version: info.version,
 				clogged_fuse: false,
 			}, received_handshake.to_vec()))
 		})
@@ -283,7 +265,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static,
 	fn upgrade_outbound(
 		self,
 		socket: TSubstream,
-		info: Self::Info,
+		_: Self::Info,
 	) -> Self::Future {
 		Box::pin(async move {
 			let mut framed = {
@@ -301,11 +283,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static,
 
 			Ok((RegisteredProtocolSubstream {
 				is_closing: false,
-				endpoint: Endpoint::Dialer,
 				send_queue: VecDeque::new(),
 				requires_poll_flush: false,
 				inner: framed.fuse(),
-				protocol_version: info.version,
 				clogged_fuse: false,
 			}, received_handshake.to_vec()))
 		})
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index 93abbbad024951cc43623dadcc53cee3ff9d2458..5fc8485947ff5310660d915944ec7bbc8c96fce2 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -39,7 +39,7 @@ use crate::{
 	},
 	on_demand_layer::AlwaysBadChecker,
 	light_client_handler, block_requests, finality_requests,
-	protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol},
+	protocol::{self, event::Event, NotifsHandlerError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol},
 	transport, ReputationChange,
 };
 use futures::{channel::oneshot, prelude::*};
@@ -1589,9 +1589,6 @@ impl<B: BlockT + 'static, H: ExHashT> Future for NetworkWorker<B, H> {
 							Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A(
 								EitherError::A(EitherError::A(EitherError::A(EitherError::B(
 								EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout",
-							Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A(
-								EitherError::A(EitherError::A(EitherError::A(EitherError::A(
-								NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed",
 							Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A(
 								EitherError::A(EitherError::A(EitherError::A(EitherError::A(
 								NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged",