From e71c484d5bd12330e16f568ad582e1fc1c878669 Mon Sep 17 00:00:00 2001
From: Aaro Altonen <48052676+altonen@users.noreply.github.com>
Date: Tue, 28 Nov 2023 20:18:52 +0200
Subject: [PATCH] Rework the event system of `sc-network` (#1370)

This commit introduces a new concept called `NotificationService` which
allows Polkadot protocols to communicate with the underlying
notification protocol implementation directly, without routing events
through `NetworkWorker`. This implies that each protocol has its own
service which it uses to communicate with remote peers and that each
`NotificationService` is unique with respect to the underlying
notification protocol, meaning `NotificationService` for the transaction
protocol can only be used to send and receive transaction-related
notifications.

The `NotificationService` concept introduces two additional benefits:
  * allow protocols to start using custom handshakes
  * allow protocols to accept/reject inbound peers

Previously the validation of inbound connections was solely the
responsibility of `ProtocolController`. This caused issues with light
peers and `SyncingEngine` as `ProtocolController` would accept more
peers than `SyncingEngine` could accept which caused peers to have
differing views of their own states. `SyncingEngine` would reject excess
peers but these rejections were not properly communicated to those peers
causing them to assume that they were accepted.

With `NotificationService`, the local handshake is not sent to remote
peer if peer is rejected which allows it to detect that it was rejected.

This commit also deprecates the use of `NetworkEventStream` for all
notification-related events and going forward only DHT events are
provided through `NetworkEventStream`. If protocols wish to follow each
other's events, they must introduce additional abtractions, as is done
for GRANDPA and transactions protocols by following the syncing protocol
through `SyncEventStream`.

Fixes https://github.com/paritytech/polkadot-sdk/issues/512
Fixes https://github.com/paritytech/polkadot-sdk/issues/514
Fixes https://github.com/paritytech/polkadot-sdk/issues/515
Fixes https://github.com/paritytech/polkadot-sdk/issues/554
Fixes https://github.com/paritytech/polkadot-sdk/issues/556

---
These changes are transferred from
https://github.com/paritytech/substrate/pull/14197 but there are no
functional changes compared to that PR

---------

Co-authored-by: Dmitry Markin <dmitry@markin.tech>
Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
---
 Cargo.lock                                    |   6 +
 .../relay-chain-minimal-node/Cargo.toml       |   1 +
 .../src/collator_overseer.rs                  |  15 +-
 .../relay-chain-minimal-node/src/lib.rs       |  14 +-
 .../relay-chain-minimal-node/src/network.rs   |  25 +-
 polkadot/node/network/bridge/src/lib.rs       |   1 +
 polkadot/node/network/bridge/src/network.rs   |  96 +-
 polkadot/node/network/bridge/src/rx/mod.rs    | 952 +++++++++--------
 polkadot/node/network/bridge/src/rx/tests.rs  | 335 ++++--
 polkadot/node/network/bridge/src/tx/mod.rs    |  42 +-
 polkadot/node/network/bridge/src/tx/tests.rs  | 130 ++-
 .../network/bridge/src/validator_discovery.rs |  13 +-
 .../node/network/protocol/src/peer_set.rs     |  77 +-
 polkadot/node/service/Cargo.toml              |   1 +
 polkadot/node/service/src/lib.rs              |  56 +-
 polkadot/node/service/src/overseer.rs         |  14 +-
 .../bin/node-template/node/src/service.rs     |   7 +-
 substrate/bin/node/cli/src/service.rs         |  33 +-
 .../consensus/beefy/src/communication/mod.rs  |  12 +-
 substrate/client/consensus/beefy/src/lib.rs   |   6 +-
 substrate/client/consensus/beefy/src/tests.rs |  41 +-
 .../client/consensus/beefy/src/worker.rs      |   4 +
 .../grandpa/src/communication/mod.rs          |   4 +-
 .../grandpa/src/communication/tests.rs        | 232 ++--
 substrate/client/consensus/grandpa/src/lib.rs |  22 +-
 .../client/consensus/grandpa/src/observer.rs  |  23 +-
 .../client/consensus/grandpa/src/tests.rs     |  82 +-
 .../client/executor/wasmtime/src/tests.rs     |   8 +-
 substrate/client/mixnet/Cargo.toml            |   1 +
 .../client/mixnet/src/packet_dispatcher.rs    |  36 +-
 substrate/client/mixnet/src/protocol.rs       |  25 +-
 substrate/client/mixnet/src/run.rs            |  60 +-
 .../client/mixnet/src/sync_with_runtime.rs    |   1 +
 substrate/client/network-gossip/Cargo.toml    |   2 +
 substrate/client/network-gossip/src/bridge.rs | 268 +++--
 .../network-gossip/src/state_machine.rs       | 166 ++-
 substrate/client/network/Cargo.toml           |   2 +
 substrate/client/network/common/src/role.rs   |  14 +-
 substrate/client/network/src/behaviour.rs     |  55 +-
 substrate/client/network/src/config.rs        |  90 +-
 substrate/client/network/src/error.rs         |   9 +
 substrate/client/network/src/event.rs         |  49 +-
 substrate/client/network/src/lib.rs           |  22 +-
 substrate/client/network/src/mock.rs          |   9 +
 substrate/client/network/src/peer_store.rs    |  41 +-
 substrate/client/network/src/protocol.rs      | 441 +++-----
 .../client/network/src/protocol/message.rs    |   1 +
 .../network/src/protocol/notifications.rs     |   4 +
 .../src/protocol/notifications/behaviour.rs   | 994 ++++++++++++++----
 .../src/protocol/notifications/handler.rs     |  73 +-
 .../protocol/notifications/service/metrics.rs | 130 +++
 .../src/protocol/notifications/service/mod.rs | 634 +++++++++++
 .../protocol/notifications/service/tests.rs   | 839 +++++++++++++++
 .../src/protocol/notifications/tests.rs       |  31 +-
 .../client/network/src/protocol_controller.rs |   3 +
 substrate/client/network/src/service.rs       | 218 ++--
 .../client/network/src/service/metrics.rs     |  28 -
 .../client/network/src/service/signature.rs   |   2 +
 .../client/network/src/service/traits.rs      | 205 +++-
 substrate/client/network/statement/src/lib.rs | 123 ++-
 substrate/client/network/sync/src/engine.rs   | 386 ++++---
 .../client/network/sync/src/service/mock.rs   |   2 +
 substrate/client/network/test/src/lib.rs      |  97 +-
 substrate/client/network/test/src/service.rs  | 337 +++---
 substrate/client/network/test/src/sync.rs     |  86 +-
 .../client/network/transactions/src/lib.rs    | 156 ++-
 substrate/client/offchain/src/api.rs          |   6 +-
 substrate/client/offchain/src/lib.rs          |   8 +-
 substrate/client/service/src/builder.rs       |  33 +-
 .../data/account_reentrance_count_call.wat    |   2 +-
 .../data/add_remove_delegate_dependency.wat   |  16 +-
 .../frame/contracts/fixtures/data/balance.wat |   2 +-
 .../frame/contracts/fixtures/data/call.wat    |   2 +-
 .../fixtures/data/call_runtime_and_call.wat   |   2 +-
 .../fixtures/data/caller_contract.wat         |  66 +-
 .../fixtures/data/chain_extension.wat         |   2 +-
 .../data/chain_extension_temp_storage.wat     |   2 +-
 .../fixtures/data/create_storage_and_call.wat |   2 +-
 .../data/create_storage_and_instantiate.wat   |   2 +-
 .../contracts/fixtures/data/crypto_hashes.wat |   6 +-
 .../data/debug_message_invalid_utf8.wat       |   2 +-
 .../data/debug_message_logging_disabled.wat   |   2 +-
 .../fixtures/data/debug_message_works.wat     |   2 +-
 .../contracts/fixtures/data/delegate_call.wat |   6 +-
 .../fixtures/data/delegate_call_lib.wat       |   2 +-
 .../fixtures/data/delegate_call_simple.wat    |   2 +-
 .../fixtures/data/destroy_and_transfer.wat    |   2 +-
 .../frame/contracts/fixtures/data/drain.wat   |   2 +-
 .../contracts/fixtures/data/ecdsa_recover.wat |   2 +-
 .../contracts/fixtures/data/event_size.wat    |   2 +-
 .../contracts/fixtures/data/multi_store.wat   |   2 +-
 .../fixtures/data/reentrance_count_call.wat   |  10 +-
 .../data/reentrance_count_delegated_call.wat  |  12 +-
 .../contracts/fixtures/data/self_destruct.wat |   2 +-
 .../data/self_destructing_constructor.wat     |   2 +-
 .../contracts/fixtures/data/set_code_hash.wat |   6 +-
 .../contracts/fixtures/data/storage_size.wat  |   2 +-
 .../contracts/fixtures/data/store_call.wat    |   2 +-
 .../contracts/fixtures/data/store_deploy.wat  |   2 +-
 .../contracts/fixtures/data/xcm_execute.wat   |   2 +-
 .../contracts/fixtures/data/xcm_send.wat      |   2 +-
 substrate/frame/contracts/src/wasm/mod.rs     |  54 +-
 102 files changed, 5628 insertions(+), 2537 deletions(-)
 create mode 100644 substrate/client/network/src/protocol/notifications/service/metrics.rs
 create mode 100644 substrate/client/network/src/protocol/notifications/service/mod.rs
 create mode 100644 substrate/client/network/src/protocol/notifications/service/tests.rs

diff --git a/Cargo.lock b/Cargo.lock
index f99b579bfe9..4fa9e83a173 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4177,6 +4177,7 @@ dependencies = [
  "cumulus-relay-chain-interface",
  "cumulus-relay-chain-rpc-interface",
  "futures",
+ "parking_lot 0.12.1",
  "polkadot-availability-recovery",
  "polkadot-collator-protocol",
  "polkadot-core-primitives",
@@ -13268,6 +13269,7 @@ dependencies = [
  "pallet-transaction-payment-rpc-runtime-api",
  "parity-db",
  "parity-scale-codec",
+ "parking_lot 0.12.1",
  "polkadot-approval-distribution",
  "polkadot-availability-bitfield-distribution",
  "polkadot-availability-distribution",
@@ -15728,6 +15730,7 @@ dependencies = [
  "array-bytes 4.2.0",
  "arrayvec 0.7.4",
  "blake2 0.10.6",
+ "bytes",
  "futures",
  "futures-timer",
  "libp2p-identity",
@@ -15793,6 +15796,7 @@ dependencies = [
  "tempfile",
  "thiserror",
  "tokio",
+ "tokio-stream",
  "tokio-test",
  "tokio-util",
  "unsigned-varint",
@@ -15848,10 +15852,12 @@ name = "sc-network-gossip"
 version = "0.10.0-dev"
 dependencies = [
  "ahash 0.8.3",
+ "async-trait",
  "futures",
  "futures-timer",
  "libp2p",
  "log",
+ "parity-scale-codec",
  "quickcheck",
  "sc-network",
  "sc-network-common",
diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml
index ce76fc5cd6d..ee93df09ce1 100644
--- a/cumulus/client/relay-chain-minimal-node/Cargo.toml
+++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml
@@ -47,4 +47,5 @@ array-bytes = "6.1"
 tracing = "0.1.37"
 async-trait = "0.1.73"
 futures = "0.3.28"
+parking_lot = "0.12.1"
 
diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
index a785a9f6f79..5f5bf338ef9 100644
--- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
@@ -15,7 +15,8 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 use futures::{select, StreamExt};
-use std::sync::Arc;
+use parking_lot::Mutex;
+use std::{collections::HashMap, sync::Arc};
 
 use polkadot_availability_recovery::AvailabilityRecoverySubsystem;
 use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide};
@@ -28,7 +29,7 @@ use polkadot_node_core_chain_api::ChainApiSubsystem;
 use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem;
 use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
 use polkadot_node_network_protocol::{
-	peer_set::PeerSetProtocolNames,
+	peer_set::{PeerSet, PeerSetProtocolNames},
 	request_response::{
 		v1::{self, AvailableDataFetchingRequest},
 		v2, IncomingRequestReceiver, ReqProtocolNames,
@@ -42,7 +43,7 @@ use polkadot_overseer::{
 use polkadot_primitives::CollatorPair;
 
 use sc_authority_discovery::Service as AuthorityDiscoveryService;
-use sc_network::NetworkStateInfo;
+use sc_network::{NetworkStateInfo, NotificationService};
 use sc_service::TaskManager;
 use sc_utils::mpsc::tracing_unbounded;
 
@@ -77,6 +78,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> {
 	pub req_protocol_names: ReqProtocolNames,
 	/// Peerset protocols name mapping
 	pub peer_set_protocol_names: PeerSetProtocolNames,
+	/// Notification services for validation/collation protocols.
+	pub notification_services: HashMap<PeerSet, Box<dyn NotificationService>>,
 }
 
 fn build_overseer(
@@ -94,6 +97,7 @@ fn build_overseer(
 		collator_pair,
 		req_protocol_names,
 		peer_set_protocol_names,
+		notification_services,
 	}: CollatorOverseerGenArgs<'_>,
 ) -> Result<
 	(Overseer<SpawnGlue<sc_service::SpawnTaskHandle>, Arc<BlockChainRpcClient>>, OverseerHandle),
@@ -101,6 +105,8 @@ fn build_overseer(
 > {
 	let spawner = SpawnGlue(spawner);
 	let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?;
+	let notification_sinks = Arc::new(Mutex::new(HashMap::new()));
+
 	let builder = Overseer::builder()
 		.availability_distribution(DummySubsystem)
 		.availability_recovery(AvailabilityRecoverySubsystem::for_collator(
@@ -131,6 +137,8 @@ fn build_overseer(
 			sync_oracle,
 			network_bridge_metrics.clone(),
 			peer_set_protocol_names.clone(),
+			notification_services,
+			notification_sinks.clone(),
 		))
 		.network_bridge_tx(NetworkBridgeTxSubsystem::new(
 			network_service,
@@ -138,6 +146,7 @@ fn build_overseer(
 			network_bridge_metrics,
 			req_protocol_names,
 			peer_set_protocol_names,
+			notification_sinks,
 		))
 		.provisioner(DummySubsystem)
 		.runtime_api(RuntimeApiSubsystem::new(
diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs
index 8801f93640c..d121d2d3356 100644
--- a/cumulus/client/relay-chain-minimal-node/src/lib.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs
@@ -21,7 +21,7 @@ use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterf
 use network::build_collator_network;
 use polkadot_network_bridge::{peer_sets_info, IsAuthority};
 use polkadot_node_network_protocol::{
-	peer_set::PeerSetProtocolNames,
+	peer_set::{PeerSet, PeerSetProtocolNames},
 	request_response::{
 		v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames,
 	},
@@ -175,10 +175,13 @@ async fn new_minimal_relay_chain(
 	let peer_set_protocol_names =
 		PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
 	let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
-
-	for config in peer_sets_info(is_authority, &peer_set_protocol_names) {
-		net_config.add_notification_protocol(config);
-	}
+	let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names)
+		.into_iter()
+		.map(|(config, (peerset, service))| {
+			net_config.add_notification_protocol(config);
+			(peerset, service)
+		})
+		.collect::<std::collections::HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>();
 
 	let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
 	let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) =
@@ -218,6 +221,7 @@ async fn new_minimal_relay_chain(
 		collator_pair,
 		req_protocol_names: request_protocol_names,
 		peer_set_protocol_names,
+		notification_services,
 	};
 
 	let overseer_handle =
diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs
index 813dca47a03..95785063c1a 100644
--- a/cumulus/client/relay-chain-minimal-node/src/network.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/network.rs
@@ -26,10 +26,9 @@ use sc_network::{
 	NetworkService,
 };
 
-use sc_network::config::FullNetworkConfiguration;
+use sc_network::{config::FullNetworkConfiguration, NotificationService};
 use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake};
 use sc_service::{error::Error, Configuration, NetworkStarter, SpawnTaskHandle};
-use sc_utils::mpsc::tracing_unbounded;
 
 use std::{iter, sync::Arc};
 
@@ -45,7 +44,7 @@ pub(crate) fn build_collator_network(
 	Error,
 > {
 	let protocol_id = config.protocol_id();
-	let block_announce_config = get_block_announce_proto_config::<Block>(
+	let (block_announce_config, _notification_service) = get_block_announce_proto_config::<Block>(
 		protocol_id.clone(),
 		&None,
 		Roles::from(&config.role),
@@ -69,8 +68,6 @@ pub(crate) fn build_collator_network(
 	let peer_store_handle = peer_store.handle();
 	spawn_handle.spawn("peer-store", Some("networking"), peer_store.run());
 
-	// RX is not used for anything because syncing is not started for the minimal node
-	let (tx, _rx) = tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
 	let network_params = sc_network::config::Params::<Block> {
 		role: config.role.clone(),
 		executor: {
@@ -86,7 +83,6 @@ pub(crate) fn build_collator_network(
 		protocol_id,
 		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
 		block_announce_config,
-		tx,
 	};
 
 	let network_worker = sc_network::NetworkWorker::new(network_params)?;
@@ -150,7 +146,7 @@ fn get_block_announce_proto_config<B: BlockT>(
 	best_number: NumberFor<B>,
 	best_hash: B::Hash,
 	genesis_hash: B::Hash,
-) -> NonDefaultSetConfig {
+) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
 	let block_announces_protocol = {
 		let genesis_hash = genesis_hash.as_ref();
 		if let Some(ref fork_id) = fork_id {
@@ -160,12 +156,11 @@ fn get_block_announce_proto_config<B: BlockT>(
 		}
 	};
 
-	NonDefaultSetConfig {
-		notifications_protocol: block_announces_protocol.into(),
-		fallback_names: iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into())
-			.collect(),
-		max_notification_size: 1024 * 1024,
-		handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build(
+	NonDefaultSetConfig::new(
+		block_announces_protocol.into(),
+		iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(),
+		1024 * 1024,
+		Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build(
 			roles,
 			best_number,
 			best_hash,
@@ -173,11 +168,11 @@ fn get_block_announce_proto_config<B: BlockT>(
 		))),
 		// NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement
 		// protocol is still hardcoded into the peerset.
-		set_config: SetConfig {
+		SetConfig {
 			in_peers: 0,
 			out_peers: 0,
 			reserved_nodes: Vec::new(),
 			non_reserved_mode: NonReservedPeerMode::Deny,
 		},
-	}
+	)
 }
diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs
index 46d4a00faac..ddce99d5c2a 100644
--- a/polkadot/node/network/bridge/src/lib.rs
+++ b/polkadot/node/network/bridge/src/lib.rs
@@ -83,6 +83,7 @@ pub(crate) enum WireMessage<M> {
 	ViewUpdate(View),
 }
 
+#[derive(Debug)]
 pub(crate) struct PeerData {
 	/// The Latest view sent by the peer.
 	view: View,
diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs
index c264c94cc19..a9339a5c443 100644
--- a/polkadot/node/network/bridge/src/network.rs
+++ b/polkadot/node/network/bridge/src/network.rs
@@ -14,23 +14,24 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use std::{collections::HashSet, sync::Arc};
+use std::{
+	collections::{HashMap, HashSet},
+	sync::Arc,
+};
 
 use async_trait::async_trait;
-use futures::{prelude::*, stream::BoxStream};
+use parking_lot::Mutex;
 
 use parity_scale_codec::Encode;
 
 use sc_network::{
-	config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, Event as NetworkEvent,
-	IfDisconnected, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkRequest,
-	NetworkService, OutboundFailure, ReputationChange, RequestFailure,
+	config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, IfDisconnected, MessageSink,
+	NetworkPeers, NetworkRequest, NetworkService, OutboundFailure, ReputationChange,
+	RequestFailure,
 };
 
 use polkadot_node_network_protocol::{
-	peer_set::{
-		CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion,
-	},
+	peer_set::{CollationVersion, PeerSet, ProtocolVersion, ValidationVersion},
 	request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests},
 	v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId,
 };
@@ -44,104 +45,94 @@ const LOG_TARGET: &'static str = "parachain::network-bridge-net";
 // Helper function to send a validation v1 message to a list of peers.
 // Messages are always sent via the main protocol, even legacy protocol messages.
 pub(crate) fn send_validation_message_v1(
-	net: &mut impl Network,
 	peers: Vec<PeerId>,
-	peerset_protocol_names: &PeerSetProtocolNames,
 	message: WireMessage<protocol_v1::ValidationProtocol>,
 	metrics: &Metrics,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) {
 	gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",);
 
 	send_message(
-		net,
 		peers,
 		PeerSet::Validation,
 		ValidationVersion::V1.into(),
-		peerset_protocol_names,
 		message,
 		metrics,
+		notification_sinks,
 	);
 }
 
 // Helper function to send a validation vstaging message to a list of peers.
 // Messages are always sent via the main protocol, even legacy protocol messages.
 pub(crate) fn send_validation_message_vstaging(
-	net: &mut impl Network,
 	peers: Vec<PeerId>,
-	peerset_protocol_names: &PeerSetProtocolNames,
 	message: WireMessage<protocol_vstaging::ValidationProtocol>,
 	metrics: &Metrics,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) {
 	gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation vstaging message to peers",);
 
 	send_message(
-		net,
 		peers,
 		PeerSet::Validation,
 		ValidationVersion::VStaging.into(),
-		peerset_protocol_names,
 		message,
 		metrics,
+		notification_sinks,
 	);
 }
 
 // Helper function to send a validation v2 message to a list of peers.
 // Messages are always sent via the main protocol, even legacy protocol messages.
 pub(crate) fn send_validation_message_v2(
-	net: &mut impl Network,
 	peers: Vec<PeerId>,
-	protocol_names: &PeerSetProtocolNames,
 	message: WireMessage<protocol_v2::ValidationProtocol>,
 	metrics: &Metrics,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) {
 	send_message(
-		net,
 		peers,
 		PeerSet::Validation,
 		ValidationVersion::V2.into(),
-		protocol_names,
 		message,
 		metrics,
+		notification_sinks,
 	);
 }
 
 // Helper function to send a collation v1 message to a list of peers.
 // Messages are always sent via the main protocol, even legacy protocol messages.
 pub(crate) fn send_collation_message_v1(
-	net: &mut impl Network,
 	peers: Vec<PeerId>,
-	peerset_protocol_names: &PeerSetProtocolNames,
 	message: WireMessage<protocol_v1::CollationProtocol>,
 	metrics: &Metrics,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) {
 	send_message(
-		net,
 		peers,
 		PeerSet::Collation,
 		CollationVersion::V1.into(),
-		peerset_protocol_names,
 		message,
 		metrics,
+		notification_sinks,
 	);
 }
 
 // Helper function to send a collation v2 message to a list of peers.
 // Messages are always sent via the main protocol, even legacy protocol messages.
 pub(crate) fn send_collation_message_v2(
-	net: &mut impl Network,
 	peers: Vec<PeerId>,
-	peerset_protocol_names: &PeerSetProtocolNames,
 	message: WireMessage<protocol_v2::CollationProtocol>,
 	metrics: &Metrics,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) {
 	send_message(
-		net,
 		peers,
 		PeerSet::Collation,
 		CollationVersion::V2.into(),
-		peerset_protocol_names,
 		message,
 		metrics,
+		notification_sinks,
 	);
 }
 
@@ -151,19 +142,19 @@ pub(crate) fn send_collation_message_v2(
 /// messages that are compatible with the passed peer set, as that is currently not enforced by
 /// this function. These are messages of type `WireMessage` parameterized on the matching type.
 fn send_message<M>(
-	net: &mut impl Network,
 	mut peers: Vec<PeerId>,
 	peer_set: PeerSet,
 	version: ProtocolVersion,
-	protocol_names: &PeerSetProtocolNames,
 	message: M,
 	metrics: &super::Metrics,
+	network_notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) where
 	M: Encode + Clone,
 {
 	if peers.is_empty() {
 		return
 	}
+
 	let message = {
 		let encoded = message.encode();
 		metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len());
@@ -171,13 +162,13 @@ fn send_message<M>(
 		encoded
 	};
 
-	// optimization: generate the protocol name once.
-	let protocol_name = protocol_names.get_name(peer_set, version);
+	let notification_sinks = network_notification_sinks.lock();
+
 	gum::trace!(
 		target: LOG_TARGET,
 		?peers,
+		?peer_set,
 		?version,
-		?protocol_name,
 		?message,
 		"Sending message to peers",
 	);
@@ -185,29 +176,26 @@ fn send_message<M>(
 	// optimization: avoid cloning the message for the last peer in the
 	// list. The message payload can be quite large. If the underlying
 	// network used `Bytes` this would not be necessary.
+	//
+	// peer may have gotten disconnect by the time `send_message()` is called
+	// at which point the the sink is not available.
 	let last_peer = peers.pop();
-
-	// We always send messages on the "main" name even when a negotiated
-	// fallback is used. The libp2p implementation handles the fallback
-	// under the hood.
-	let protocol_name = protocol_names.get_main_name(peer_set);
 	peers.into_iter().for_each(|peer| {
-		net.write_notification(peer, protocol_name.clone(), message.clone());
+		if let Some(sink) = notification_sinks.get(&(peer_set, peer)) {
+			sink.send_sync_notification(message.clone());
+		}
 	});
+
 	if let Some(peer) = last_peer {
-		net.write_notification(peer, protocol_name, message);
+		if let Some(sink) = notification_sinks.get(&(peer_set, peer)) {
+			sink.send_sync_notification(message.clone());
+		}
 	}
 }
 
 /// An abstraction over networking for the purposes of this subsystem.
 #[async_trait]
 pub trait Network: Clone + Send + 'static {
-	/// Get a stream of all events occurring on the network. This may include events unrelated
-	/// to the Polkadot protocol - the user of this function should filter only for events related
-	/// to the [`VALIDATION_PROTOCOL_NAME`](VALIDATION_PROTOCOL_NAME)
-	/// or [`COLLATION_PROTOCOL_NAME`](COLLATION_PROTOCOL_NAME)
-	fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent>;
-
 	/// Ask the network to keep a substream open with these nodes and not disconnect from them
 	/// until removed from the protocol's peer set.
 	/// Note that `out_peers` setting has no effect on this.
@@ -239,16 +227,12 @@ pub trait Network: Clone + Send + 'static {
 	/// Disconnect a given peer from the protocol specified without harming reputation.
 	fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName);
 
-	/// Write a notification to a peer on the given protocol.
-	fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>);
+	/// Get peer role.
+	fn peer_role(&self, who: PeerId, handshake: Vec<u8>) -> Option<sc_network::ObservedRole>;
 }
 
 #[async_trait]
 impl Network for Arc<NetworkService<Block, Hash>> {
-	fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
-		NetworkService::event_stream(self, "polkadot-network-bridge").boxed()
-	}
-
 	async fn set_reserved_peers(
 		&mut self,
 		protocol: ProtocolName,
@@ -273,10 +257,6 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 		NetworkService::disconnect_peer(&**self, who, protocol);
 	}
 
-	fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		NetworkService::write_notification(&**self, who, protocol, message);
-	}
-
 	async fn start_request<AD: AuthorityDiscovery>(
 		&self,
 		authority_discovery: &mut AD,
@@ -348,6 +328,10 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 			if_disconnected,
 		);
 	}
+
+	fn peer_role(&self, who: PeerId, handshake: Vec<u8>) -> Option<sc_network::ObservedRole> {
+		NetworkService::peer_role(self, who, handshake)
+	}
 }
 
 /// We assume one `peer_id` per `authority_id`.
diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs
index 06be57ead00..40cd167a968 100644
--- a/polkadot/node/network/bridge/src/rx/mod.rs
+++ b/polkadot/node/network/bridge/src/rx/mod.rs
@@ -20,11 +20,14 @@ use super::*;
 
 use always_assert::never;
 use bytes::Bytes;
-use futures::stream::{BoxStream, StreamExt};
 use net_protocol::filter_by_peer_version;
 use parity_scale_codec::{Decode, DecodeAll};
+use parking_lot::Mutex;
 
-use sc_network::Event as NetworkEvent;
+use sc_network::{
+	service::traits::{NotificationEvent, ValidationResult},
+	MessageSink, NotificationService,
+};
 use sp_consensus::SyncOracle;
 
 use polkadot_node_network_protocol::{
@@ -88,6 +91,9 @@ pub struct NetworkBridgeRx<N, AD> {
 	shared: Shared,
 	metrics: Metrics,
 	peerset_protocol_names: PeerSetProtocolNames,
+	validation_service: Box<dyn NotificationService>,
+	collation_service: Box<dyn NotificationService>,
+	notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 }
 
 impl<N, AD> NetworkBridgeRx<N, AD> {
@@ -102,8 +108,18 @@ impl<N, AD> NetworkBridgeRx<N, AD> {
 		sync_oracle: Box<dyn SyncOracle + Send>,
 		metrics: Metrics,
 		peerset_protocol_names: PeerSetProtocolNames,
+		mut notification_services: HashMap<PeerSet, Box<dyn NotificationService>>,
+		notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 	) -> Self {
 		let shared = Shared::default();
+
+		let validation_service = notification_services
+			.remove(&PeerSet::Validation)
+			.expect("validation protocol was enabled so `NotificationService` must exist; qed");
+		let collation_service = notification_services
+			.remove(&PeerSet::Collation)
+			.expect("collation protocol was enabled so `NotificationService` must exist; qed");
+
 		Self {
 			network_service,
 			authority_discovery_service,
@@ -111,6 +127,9 @@ impl<N, AD> NetworkBridgeRx<N, AD> {
 			shared,
 			metrics,
 			peerset_protocol_names,
+			validation_service,
+			collation_service,
+			notification_sinks,
 		}
 	}
 }
@@ -121,444 +140,563 @@ where
 	Net: Network + Sync,
 	AD: validator_discovery::AuthorityDiscovery + Clone + Sync,
 {
-	fn start(mut self, ctx: Context) -> SpawnedSubsystem {
-		// The stream of networking events has to be created at initialization, otherwise the
-		// networking might open connections before the stream of events has been grabbed.
-		let network_stream = self.network_service.event_stream();
-
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		// Swallow error because failure is fatal to the node and we log with more precision
 		// within `run_network`.
-		let future = run_network_in(self, ctx, network_stream)
+		let future = run_network_in(self, ctx)
 			.map_err(|e| SubsystemError::with_origin("network-bridge", e))
 			.boxed();
 		SpawnedSubsystem { name: "network-bridge-rx-subsystem", future }
 	}
 }
 
-async fn handle_network_messages<AD>(
-	mut sender: impl overseer::NetworkBridgeRxSenderTrait,
-	mut network_service: impl Network,
-	network_stream: BoxStream<'static, NetworkEvent>,
-	mut authority_discovery_service: AD,
-	metrics: Metrics,
-	shared: Shared,
-	peerset_protocol_names: PeerSetProtocolNames,
-) -> Result<(), Error>
-where
+/// Handle notification event received over the validation protocol.
+async fn handle_validation_message<AD>(
+	event: NotificationEvent,
+	network_service: &mut impl Network,
+	sender: &mut impl overseer::NetworkBridgeRxSenderTrait,
+	authority_discovery_service: &mut AD,
+	metrics: &Metrics,
+	shared: &Shared,
+	peerset_protocol_names: &PeerSetProtocolNames,
+	notification_service: &mut Box<dyn NotificationService>,
+	notification_sinks: &mut Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+) where
 	AD: validator_discovery::AuthorityDiscovery + Send,
 {
-	let mut network_stream = network_stream.fuse();
-	loop {
-		match network_stream.next().await {
-			None => return Err(Error::EventStreamConcluded),
-			Some(NetworkEvent::Dht(_)) => {},
-			Some(NetworkEvent::NotificationStreamOpened {
-				remote: peer,
-				protocol,
-				role,
-				negotiated_fallback,
-				received_handshake: _,
-			}) => {
-				let role = ObservedRole::from(role);
-				let (peer_set, version) = {
-					let (peer_set, version) =
-						match peerset_protocol_names.try_get_protocol(&protocol) {
-							None => continue,
-							Some(p) => p,
-						};
-
-					if let Some(fallback) = negotiated_fallback {
-						match peerset_protocol_names.try_get_protocol(&fallback) {
-							None => {
+	match event {
+		NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => {
+			// only accept peers whose role can be determined
+			let result = network_service
+				.peer_role(peer, handshake)
+				.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+			let _ = result_tx.send(result);
+		},
+		NotificationEvent::NotificationStreamOpened {
+			peer,
+			handshake,
+			negotiated_fallback,
+			..
+		} => {
+			let role = match network_service.peer_role(peer, handshake) {
+				Some(role) => ObservedRole::from(role),
+				None => {
+					gum::debug!(
+						target: LOG_TARGET,
+						?peer,
+						"Failed to determine peer role for validation protocol",
+					);
+					return
+				},
+			};
+
+			let (peer_set, version) = {
+				let (peer_set, version) =
+					(PeerSet::Validation, PeerSet::Validation.get_main_version());
+
+				if let Some(fallback) = negotiated_fallback {
+					match peerset_protocol_names.try_get_protocol(&fallback) {
+						None => {
+							gum::debug!(
+								target: LOG_TARGET,
+								fallback = &*fallback,
+								?peer,
+								peerset = ?peer_set,
+								"Unknown fallback",
+							);
+
+							return
+						},
+						Some((p2, v2)) => {
+							if p2 != peer_set {
 								gum::debug!(
 									target: LOG_TARGET,
 									fallback = &*fallback,
-									?peer,
-									?peer_set,
-									"Unknown fallback",
+									fallback_peerset = ?p2,
+									peerset = ?peer_set,
+									"Fallback mismatched peer-set",
 								);
 
-								continue
-							},
-							Some((p2, v2)) => {
-								if p2 != peer_set {
-									gum::debug!(
-										target: LOG_TARGET,
-										fallback = &*fallback,
-										fallback_peerset = ?p2,
-										protocol = &*protocol,
-										peerset = ?peer_set,
-										"Fallback mismatched peer-set",
-									);
-
-									continue
-								}
-
-								(p2, v2)
-							},
-						}
-					} else {
-						(peer_set, version)
-					}
-				};
-
-				gum::debug!(
-					target: LOG_TARGET,
-					action = "PeerConnected",
-					peer_set = ?peer_set,
-					version = %version,
-					peer = ?peer,
-					role = ?role
-				);
-
-				let local_view = {
-					let mut shared = shared.0.lock();
-					let peer_map = match peer_set {
-						PeerSet::Validation => &mut shared.validation_peers,
-						PeerSet::Collation => &mut shared.collation_peers,
-					};
+								return
+							}
 
-					match peer_map.entry(peer) {
-						hash_map::Entry::Occupied(_) => continue,
-						hash_map::Entry::Vacant(vacant) => {
-							vacant.insert(PeerData { view: View::default(), version });
+							(p2, v2)
 						},
 					}
+				} else {
+					(peer_set, version)
+				}
+			};
+			// store the notification sink to `notification_sinks` so both `NetworkBridgeRx`
+			// and `NetworkBridgeTx` can send messages to the peer.
+			match notification_service.message_sink(&peer) {
+				Some(sink) => {
+					notification_sinks.lock().insert((peer_set, peer), sink);
+				},
+				None => {
+					gum::warn!(
+						target: LOG_TARGET,
+						peerset = ?peer_set,
+						version = %version,
+						?peer,
+						?role,
+						"Message sink not available for peer",
+					);
+					return
+				},
+			}
 
-					metrics.on_peer_connected(peer_set, version);
-					metrics.note_peer_count(peer_set, version, peer_map.len());
-
-					shared.local_view.clone().unwrap_or(View::default())
-				};
-
-				let maybe_authority =
-					authority_discovery_service.get_authority_ids_by_peer_id(peer).await;
-
-				match peer_set {
-					PeerSet::Validation => {
-						dispatch_validation_events_to_all(
-							vec![
-								NetworkBridgeEvent::PeerConnected(
-									peer,
-									role,
-									version,
-									maybe_authority,
-								),
-								NetworkBridgeEvent::PeerViewChange(peer, View::default()),
-							],
-							&mut sender,
-							&metrics,
-						)
-						.await;
-
-						match ValidationVersion::try_from(version)
-							.expect("try_get_protocol has already checked version is known; qed")
-						{
-							ValidationVersion::V1 => send_validation_message_v1(
-								&mut network_service,
-								vec![peer],
-								&peerset_protocol_names,
-								WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(
-									local_view,
-								),
-								&metrics,
-							),
-							ValidationVersion::VStaging => send_validation_message_vstaging(
-								&mut network_service,
-								vec![peer],
-								&peerset_protocol_names,
-								WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate(
-									local_view,
-								),
-								&metrics,
-							),
-							ValidationVersion::V2 => send_validation_message_v2(
-								&mut network_service,
-								vec![peer],
-								&peerset_protocol_names,
-								WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate(
-									local_view,
-								),
-								&metrics,
-							),
-						}
-					},
-					PeerSet::Collation => {
-						dispatch_collation_events_to_all(
-							vec![
-								NetworkBridgeEvent::PeerConnected(
-									peer,
-									role,
-									version,
-									maybe_authority,
-								),
-								NetworkBridgeEvent::PeerViewChange(peer, View::default()),
-							],
-							&mut sender,
-						)
-						.await;
-
-						match CollationVersion::try_from(version)
-							.expect("try_get_protocol has already checked version is known; qed")
-						{
-							CollationVersion::V1 => send_collation_message_v1(
-								&mut network_service,
-								vec![peer],
-								&peerset_protocol_names,
-								WireMessage::<protocol_v1::CollationProtocol>::ViewUpdate(
-									local_view,
-								),
-								&metrics,
-							),
-							CollationVersion::V2 => send_collation_message_v2(
-								&mut network_service,
-								vec![peer],
-								&peerset_protocol_names,
-								WireMessage::<protocol_v2::CollationProtocol>::ViewUpdate(
-									local_view,
-								),
-								&metrics,
-							),
-						}
+			gum::debug!(
+				target: LOG_TARGET,
+				action = "PeerConnected",
+				peer_set = ?peer_set,
+				version = %version,
+				peer = ?peer,
+				role = ?role
+			);
+
+			let local_view = {
+				let mut shared = shared.0.lock();
+				let peer_map = &mut shared.validation_peers;
+
+				match peer_map.entry(peer) {
+					hash_map::Entry::Occupied(_) => return,
+					hash_map::Entry::Vacant(vacant) => {
+						vacant.insert(PeerData { view: View::default(), version });
 					},
 				}
-			},
-			Some(NetworkEvent::NotificationStreamClosed { remote: peer, protocol }) => {
-				let (peer_set, version) = match peerset_protocol_names.try_get_protocol(&protocol) {
-					None => continue,
-					Some(peer_set) => peer_set,
-				};
 
-				gum::debug!(
-					target: LOG_TARGET,
-					action = "PeerDisconnected",
-					peer_set = ?peer_set,
-					peer = ?peer
-				);
+				metrics.on_peer_connected(peer_set, version);
+				metrics.note_peer_count(peer_set, version, peer_map.len());
 
-				let was_connected = {
-					let mut shared = shared.0.lock();
-					let peer_map = match peer_set {
-						PeerSet::Validation => &mut shared.validation_peers,
-						PeerSet::Collation => &mut shared.collation_peers,
-					};
+				shared.local_view.clone().unwrap_or(View::default())
+			};
 
-					let w = peer_map.remove(&peer).is_some();
+			let maybe_authority =
+				authority_discovery_service.get_authority_ids_by_peer_id(peer).await;
 
-					metrics.on_peer_disconnected(peer_set, version);
-					metrics.note_peer_count(peer_set, version, peer_map.len());
+			dispatch_validation_events_to_all(
+				vec![
+					NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority),
+					NetworkBridgeEvent::PeerViewChange(peer, View::default()),
+				],
+				sender,
+				&metrics,
+			)
+			.await;
 
-					w
-				};
+			match ValidationVersion::try_from(version)
+				.expect("try_get_protocol has already checked version is known; qed")
+			{
+				ValidationVersion::V1 => send_validation_message_v1(
+					vec![peer],
+					WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(local_view),
+					metrics,
+					notification_sinks,
+				),
+				ValidationVersion::VStaging => send_validation_message_vstaging(
+					vec![peer],
+					WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate(local_view),
+					metrics,
+					notification_sinks,
+				),
+				ValidationVersion::V2 => send_validation_message_v2(
+					vec![peer],
+					WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate(local_view),
+					metrics,
+					notification_sinks,
+				),
+			}
+		},
+		NotificationEvent::NotificationStreamClosed { peer } => {
+			let (peer_set, version) = (PeerSet::Validation, PeerSet::Validation.get_main_version());
 
-				if was_connected && version == peer_set.get_main_version() {
-					match peer_set {
-						PeerSet::Validation =>
-							dispatch_validation_event_to_all(
-								NetworkBridgeEvent::PeerDisconnected(peer),
-								&mut sender,
-								&metrics,
-							)
-							.await,
-						PeerSet::Collation =>
-							dispatch_collation_event_to_all(
-								NetworkBridgeEvent::PeerDisconnected(peer),
-								&mut sender,
-							)
-							.await,
-					}
+			gum::debug!(
+				target: LOG_TARGET,
+				action = "PeerDisconnected",
+				?peer_set,
+				?peer
+			);
+
+			let was_connected = {
+				let mut shared = shared.0.lock();
+				let peer_map = &mut shared.validation_peers;
+
+				let w = peer_map.remove(&peer).is_some();
+
+				metrics.on_peer_disconnected(peer_set, version);
+				metrics.note_peer_count(peer_set, version, peer_map.len());
+
+				w
+			};
+
+			notification_sinks.lock().remove(&(peer_set, peer));
+
+			if was_connected && version == peer_set.get_main_version() {
+				dispatch_validation_event_to_all(
+					NetworkBridgeEvent::PeerDisconnected(peer),
+					sender,
+					&metrics,
+				)
+				.await;
+			}
+		},
+		NotificationEvent::NotificationReceived { peer, notification } => {
+			let expected_versions = {
+				let mut versions = PerPeerSet::<Option<ProtocolVersion>>::default();
+				let shared = shared.0.lock();
+
+				if let Some(peer_data) = shared.validation_peers.get(&peer) {
+					versions[PeerSet::Validation] = Some(peer_data.version);
 				}
-			},
-			Some(NetworkEvent::NotificationsReceived { remote, messages }) => {
-				let expected_versions = {
-					let mut versions = PerPeerSet::<Option<ProtocolVersion>>::default();
-					let shared = shared.0.lock();
-					if let Some(peer_data) = shared.validation_peers.get(&remote) {
-						versions[PeerSet::Validation] = Some(peer_data.version);
-					}
 
-					if let Some(peer_data) = shared.collation_peers.get(&remote) {
-						versions[PeerSet::Collation] = Some(peer_data.version);
-					}
+				versions
+			};
 
-					versions
+			gum::trace!(
+				target: LOG_TARGET,
+				action = "PeerMessage",
+				peerset = ?PeerSet::Validation,
+				?peer,
+			);
+
+			let (events, reports) =
+				if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V1.into()) {
+					handle_peer_messages::<protocol_v1::ValidationProtocol, _>(
+						peer,
+						PeerSet::Validation,
+						&mut shared.0.lock().validation_peers,
+						vec![notification.into()],
+						metrics,
+					)
+				} else if expected_versions[PeerSet::Validation] ==
+					Some(ValidationVersion::V2.into())
+				{
+					handle_peer_messages::<protocol_v2::ValidationProtocol, _>(
+						peer,
+						PeerSet::Validation,
+						&mut shared.0.lock().validation_peers,
+						vec![notification.into()],
+						metrics,
+					)
+				} else if expected_versions[PeerSet::Validation] ==
+					Some(ValidationVersion::VStaging.into())
+				{
+					handle_peer_messages::<protocol_vstaging::ValidationProtocol, _>(
+						peer,
+						PeerSet::Validation,
+						&mut shared.0.lock().validation_peers,
+						vec![notification.into()],
+						metrics,
+					)
+				} else {
+					gum::warn!(
+						target: LOG_TARGET,
+						version = ?expected_versions[PeerSet::Validation],
+						"Major logic bug. Peer somehow has unsupported validation protocol version."
+					);
+
+					never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
+
+					// If a peer somehow triggers this, we'll disconnect them
+					// eventually.
+					(Vec::new(), vec![UNCONNECTED_PEERSET_COST])
 				};
 
-				// non-decoded, but version-checked validation messages.
-				let v_messages: Result<Vec<_>, _> = messages
-					.iter()
-					.filter_map(|(protocol, msg_bytes)| {
-						// version doesn't matter because we always receive on the 'correct'
-						// protocol name, not the negotiated fallback.
-						let (peer_set, version) =
-							peerset_protocol_names.try_get_protocol(protocol)?;
-						gum::trace!(
-							target: LOG_TARGET,
-							?peer_set,
-							?protocol,
-							?version,
-							"Received notification"
-						);
+			for report in reports {
+				network_service.report_peer(peer, report.into());
+			}
 
-						if peer_set == PeerSet::Validation {
-							if expected_versions[PeerSet::Validation].is_none() {
-								return Some(Err(UNCONNECTED_PEERSET_COST))
-							}
+			dispatch_validation_events_to_all(events, sender, &metrics).await;
+		},
+	}
+}
+
+/// Handle notification event received over the collation protocol.
+async fn handle_collation_message<AD>(
+	event: NotificationEvent,
+	network_service: &mut impl Network,
+	sender: &mut impl overseer::NetworkBridgeRxSenderTrait,
+	authority_discovery_service: &mut AD,
+	metrics: &Metrics,
+	shared: &Shared,
+	peerset_protocol_names: &PeerSetProtocolNames,
+	notification_service: &mut Box<dyn NotificationService>,
+	notification_sinks: &mut Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+) where
+	AD: validator_discovery::AuthorityDiscovery + Send,
+{
+	match event {
+		NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => {
+			// only accept peers whose role can be determined
+			let result = network_service
+				.peer_role(peer, handshake)
+				.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+			let _ = result_tx.send(result);
+		},
+		NotificationEvent::NotificationStreamOpened {
+			peer,
+			handshake,
+			negotiated_fallback,
+			..
+		} => {
+			let role = match network_service.peer_role(peer, handshake) {
+				Some(role) => ObservedRole::from(role),
+				None => {
+					gum::debug!(
+						target: LOG_TARGET,
+						?peer,
+						"Failed to determine peer role for validation protocol",
+					);
+					return
+				},
+			};
+
+			let (peer_set, version) = {
+				let (peer_set, version) =
+					(PeerSet::Collation, PeerSet::Collation.get_main_version());
+
+				if let Some(fallback) = negotiated_fallback {
+					match peerset_protocol_names.try_get_protocol(&fallback) {
+						None => {
+							gum::debug!(
+								target: LOG_TARGET,
+								fallback = &*fallback,
+								?peer,
+								?peer_set,
+								"Unknown fallback",
+							);
+
+							return
+						},
+						Some((p2, v2)) => {
+							if p2 != peer_set {
+								gum::debug!(
+									target: LOG_TARGET,
+									fallback = &*fallback,
+									fallback_peerset = ?p2,
+									peerset = ?peer_set,
+									"Fallback mismatched peer-set",
+								);
 
-							Some(Ok(msg_bytes.clone()))
-						} else {
-							None
-						}
-					})
-					.collect();
+								return
+							}
 
-				let v_messages = match v_messages {
-					Err(rep) => {
-						gum::debug!(target: LOG_TARGET, action = "ReportPeer");
-						network_service.report_peer(remote, rep.into());
+							(p2, v2)
+						},
+					}
+				} else {
+					(peer_set, version)
+				}
+			};
+
+			// store the notification sink to `notification_sinks` so both `NetworkBridgeRx`
+			// and `NetworkBridgeTx` can send messages to the peer.
+			match notification_service.message_sink(&peer) {
+				Some(sink) => {
+					notification_sinks.lock().insert((peer_set, peer), sink);
+				},
+				None => {
+					gum::warn!(
+						target: LOG_TARGET,
+						peer_set = ?peer_set,
+						version = %version,
+						peer = ?peer,
+						role = ?role,
+						"Message sink not available for peer",
+					);
+					return
+				},
+			}
 
-						continue
+			gum::debug!(
+				target: LOG_TARGET,
+				action = "PeerConnected",
+				peer_set = ?peer_set,
+				version = %version,
+				peer = ?peer,
+				role = ?role
+			);
+
+			let local_view = {
+				let mut shared = shared.0.lock();
+				let peer_map = &mut shared.collation_peers;
+
+				match peer_map.entry(peer) {
+					hash_map::Entry::Occupied(_) => return,
+					hash_map::Entry::Vacant(vacant) => {
+						vacant.insert(PeerData { view: View::default(), version });
 					},
-					Ok(v) => v,
-				};
+				}
 
-				// non-decoded, but version-checked collation messages.
-				let c_messages: Result<Vec<_>, _> = messages
-					.iter()
-					.filter_map(|(protocol, msg_bytes)| {
-						// version doesn't matter because we always receive on the 'correct'
-						// protocol name, not the negotiated fallback.
-						let (peer_set, _version) =
-							peerset_protocol_names.try_get_protocol(protocol)?;
-
-						if peer_set == PeerSet::Collation {
-							if expected_versions[PeerSet::Collation].is_none() {
-								return Some(Err(UNCONNECTED_PEERSET_COST))
-							}
+				metrics.on_peer_connected(peer_set, version);
+				metrics.note_peer_count(peer_set, version, peer_map.len());
 
-							Some(Ok(msg_bytes.clone()))
-						} else {
-							None
-						}
-					})
-					.collect();
+				shared.local_view.clone().unwrap_or(View::default())
+			};
 
-				let c_messages = match c_messages {
-					Err(rep) => {
-						gum::debug!(target: LOG_TARGET, action = "ReportPeer");
-						network_service.report_peer(remote, rep.into());
+			let maybe_authority =
+				authority_discovery_service.get_authority_ids_by_peer_id(peer).await;
 
-						continue
-					},
-					Ok(v) => v,
-				};
+			dispatch_collation_events_to_all(
+				vec![
+					NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority),
+					NetworkBridgeEvent::PeerViewChange(peer, View::default()),
+				],
+				sender,
+			)
+			.await;
 
-				if v_messages.is_empty() && c_messages.is_empty() {
-					continue
-				}
+			match CollationVersion::try_from(version)
+				.expect("try_get_protocol has already checked version is known; qed")
+			{
+				CollationVersion::V1 => send_collation_message_v1(
+					vec![peer],
+					WireMessage::<protocol_v1::CollationProtocol>::ViewUpdate(local_view),
+					metrics,
+					notification_sinks,
+				),
+				CollationVersion::V2 => send_collation_message_v2(
+					vec![peer],
+					WireMessage::<protocol_v2::CollationProtocol>::ViewUpdate(local_view),
+					metrics,
+					notification_sinks,
+				),
+			}
+		},
+		NotificationEvent::NotificationStreamClosed { peer } => {
+			let (peer_set, version) = (PeerSet::Collation, PeerSet::Collation.get_main_version());
 
-				gum::trace!(
-					target: LOG_TARGET,
-					action = "PeerMessages",
-					peer = ?remote,
-					num_validation_messages = %v_messages.len(),
-					num_collation_messages = %c_messages.len()
-				);
+			gum::debug!(
+				target: LOG_TARGET,
+				action = "PeerDisconnected",
+				?peer_set,
+				?peer
+			);
 
-				if !v_messages.is_empty() {
-					let (events, reports) = if expected_versions[PeerSet::Validation] ==
-						Some(ValidationVersion::V1.into())
-					{
-						handle_peer_messages::<protocol_v1::ValidationProtocol, _>(
-							remote,
-							PeerSet::Validation,
-							&mut shared.0.lock().validation_peers,
-							v_messages,
-							&metrics,
-						)
-					} else if expected_versions[PeerSet::Validation] ==
-						Some(ValidationVersion::V2.into())
-					{
-						handle_peer_messages::<protocol_v2::ValidationProtocol, _>(
-							remote,
-							PeerSet::Validation,
-							&mut shared.0.lock().validation_peers,
-							v_messages,
-							&metrics,
-						)
-					} else if expected_versions[PeerSet::Validation] ==
-						Some(ValidationVersion::VStaging.into())
-					{
-						handle_peer_messages::<protocol_vstaging::ValidationProtocol, _>(
-							remote,
-							PeerSet::Validation,
-							&mut shared.0.lock().validation_peers,
-							v_messages,
-							&metrics,
-						)
-					} else {
-						gum::warn!(
-							target: LOG_TARGET,
-							version = ?expected_versions[PeerSet::Validation],
-							"Major logic bug. Peer somehow has unsupported validation protocol version."
-						);
+			let was_connected = {
+				let mut shared = shared.0.lock();
+				let peer_map = &mut shared.collation_peers;
 
-						never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
+				let w = peer_map.remove(&peer).is_some();
 
-						// If a peer somehow triggers this, we'll disconnect them
-						// eventually.
-						(Vec::new(), vec![UNCONNECTED_PEERSET_COST])
-					};
+				metrics.on_peer_disconnected(peer_set, version);
+				metrics.note_peer_count(peer_set, version, peer_map.len());
 
-					for report in reports {
-						network_service.report_peer(remote, report.into());
-					}
+				w
+			};
 
-					dispatch_validation_events_to_all(events, &mut sender, &metrics).await;
+			notification_sinks.lock().remove(&(peer_set, peer));
+
+			if was_connected && version == peer_set.get_main_version() {
+				dispatch_collation_event_to_all(NetworkBridgeEvent::PeerDisconnected(peer), sender)
+					.await;
+			}
+		},
+		NotificationEvent::NotificationReceived { peer, notification } => {
+			let expected_versions = {
+				let mut versions = PerPeerSet::<Option<ProtocolVersion>>::default();
+				let shared = shared.0.lock();
+
+				if let Some(peer_data) = shared.collation_peers.get(&peer) {
+					versions[PeerSet::Collation] = Some(peer_data.version);
 				}
 
-				if !c_messages.is_empty() {
-					let (events, reports) = if expected_versions[PeerSet::Collation] ==
-						Some(CollationVersion::V1.into())
-					{
-						handle_peer_messages::<protocol_v1::CollationProtocol, _>(
-							remote,
-							PeerSet::Collation,
-							&mut shared.0.lock().collation_peers,
-							c_messages,
-							&metrics,
-						)
-					} else if expected_versions[PeerSet::Collation] ==
-						Some(CollationVersion::V2.into())
-					{
-						handle_peer_messages::<protocol_v2::CollationProtocol, _>(
-							remote,
-							PeerSet::Collation,
-							&mut shared.0.lock().collation_peers,
-							c_messages,
-							&metrics,
-						)
-					} else {
-						gum::warn!(
-							target: LOG_TARGET,
-							version = ?expected_versions[PeerSet::Collation],
-							"Major logic bug. Peer somehow has unsupported collation protocol version."
-						);
+				versions
+			};
 
-						never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
+			gum::trace!(
+				target: LOG_TARGET,
+				action = "PeerMessage",
+				peerset = ?PeerSet::Collation,
+				?peer,
+			);
+
+			let (events, reports) =
+				if expected_versions[PeerSet::Collation] == Some(CollationVersion::V1.into()) {
+					handle_peer_messages::<protocol_v1::CollationProtocol, _>(
+						peer,
+						PeerSet::Collation,
+						&mut shared.0.lock().collation_peers,
+						vec![notification.into()],
+						metrics,
+					)
+				} else if expected_versions[PeerSet::Collation] == Some(CollationVersion::V2.into())
+				{
+					handle_peer_messages::<protocol_v2::CollationProtocol, _>(
+						peer,
+						PeerSet::Collation,
+						&mut shared.0.lock().collation_peers,
+						vec![notification.into()],
+						metrics,
+					)
+				} else {
+					gum::warn!(
+						target: LOG_TARGET,
+						version = ?expected_versions[PeerSet::Collation],
+						"Major logic bug. Peer somehow has unsupported collation protocol version."
+					);
 
-						// If a peer somehow triggers this, we'll disconnect them
-						// eventually.
-						(Vec::new(), vec![UNCONNECTED_PEERSET_COST])
-					};
+					never!("Only versions 1 and 2 are supported; peer set connection checked above; qed");
 
-					for report in reports {
-						network_service.report_peer(remote, report.into());
-					}
+					// If a peer somehow triggers this, we'll disconnect them
+					// eventually.
+					(Vec::new(), vec![UNCONNECTED_PEERSET_COST])
+				};
 
-					dispatch_collation_events_to_all(events, &mut sender).await;
-				}
+			for report in reports {
+				network_service.report_peer(peer, report.into());
+			}
+
+			dispatch_collation_events_to_all(events, sender).await;
+		},
+	}
+}
+
+async fn handle_network_messages<AD>(
+	mut sender: impl overseer::NetworkBridgeRxSenderTrait,
+	mut network_service: impl Network,
+	mut authority_discovery_service: AD,
+	metrics: Metrics,
+	shared: Shared,
+	peerset_protocol_names: PeerSetProtocolNames,
+	mut validation_service: Box<dyn NotificationService>,
+	mut collation_service: Box<dyn NotificationService>,
+	mut notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+) -> Result<(), Error>
+where
+	AD: validator_discovery::AuthorityDiscovery + Send,
+{
+	loop {
+		futures::select! {
+			event = validation_service.next_event().fuse() => match event {
+				Some(event) => handle_validation_message(
+					event,
+					&mut network_service,
+					&mut sender,
+					&mut authority_discovery_service,
+					&metrics,
+					&shared,
+					&peerset_protocol_names,
+					&mut validation_service,
+					&mut notification_sinks,
+				).await,
+				None => return Err(Error::EventStreamConcluded),
 			},
+			event = collation_service.next_event().fuse() => match event {
+				Some(event) => handle_collation_message(
+					event,
+					&mut network_service,
+					&mut sender,
+					&mut authority_discovery_service,
+					&metrics,
+					&shared,
+					&peerset_protocol_names,
+					&mut collation_service,
+					&mut notification_sinks,
+				).await,
+				None => return Err(Error::EventStreamConcluded),
+			}
 		}
 	}
 }
@@ -593,17 +731,15 @@ where
 }
 
 #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)]
-async fn run_incoming_orchestra_signals<Context, N, AD>(
+async fn run_incoming_orchestra_signals<Context, AD>(
 	mut ctx: Context,
-	mut network_service: N,
 	mut authority_discovery_service: AD,
 	shared: Shared,
 	sync_oracle: Box<dyn SyncOracle + Send>,
 	metrics: Metrics,
-	peerset_protocol_names: PeerSetProtocolNames,
+	notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) -> Result<(), Error>
 where
-	N: Network,
 	AD: validator_discovery::AuthorityDiscovery + Clone,
 {
 	// This is kept sorted, descending, by block number.
@@ -695,13 +831,12 @@ where
 						mode = Mode::Active;
 
 						update_our_view(
-							&mut network_service,
 							&mut ctx,
 							&live_heads,
 							&shared,
 							finalized_number,
 							&metrics,
-							&peerset_protocol_names,
+							&notification_sinks,
 						);
 					}
 				}
@@ -735,7 +870,6 @@ where
 async fn run_network_in<N, AD, Context>(
 	bridge: NetworkBridgeRx<N, AD>,
 	mut ctx: Context,
-	network_stream: BoxStream<'static, NetworkEvent>,
 ) -> Result<(), Error>
 where
 	N: Network,
@@ -748,16 +882,21 @@ where
 		sync_oracle,
 		shared,
 		peerset_protocol_names,
+		validation_service,
+		collation_service,
+		notification_sinks,
 	} = bridge;
 
 	let (task, network_event_handler) = handle_network_messages(
 		ctx.sender().clone(),
 		network_service.clone(),
-		network_stream,
 		authority_discovery_service.clone(),
 		metrics.clone(),
 		shared.clone(),
 		peerset_protocol_names.clone(),
+		validation_service,
+		collation_service,
+		notification_sinks.clone(),
 	)
 	.remote_handle();
 
@@ -766,12 +905,11 @@ where
 
 	let orchestra_signal_handler = run_incoming_orchestra_signals(
 		ctx,
-		network_service,
 		authority_discovery_service,
 		shared,
 		sync_oracle,
 		metrics,
-		peerset_protocol_names,
+		notification_sinks,
 	);
 
 	futures::pin_mut!(orchestra_signal_handler);
@@ -791,17 +929,14 @@ fn construct_view(
 }
 
 #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)]
-fn update_our_view<Net, Context>(
-	net: &mut Net,
+fn update_our_view<Context>(
 	ctx: &mut Context,
 	live_heads: &[ActivatedLeaf],
 	shared: &Shared,
 	finalized_number: BlockNumber,
 	metrics: &Metrics,
-	peerset_protocol_names: &PeerSetProtocolNames,
-) where
-	Net: Network,
-{
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+) {
 	let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number);
 
 	let (validation_peers, collation_peers) = {
@@ -849,43 +984,38 @@ fn update_our_view<Net, Context>(
 		filter_by_peer_version(&validation_peers, ValidationVersion::VStaging.into());
 
 	send_validation_message_v1(
-		net,
 		v1_validation_peers,
-		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
+		notification_sinks,
 	);
 
 	send_collation_message_v1(
-		net,
 		v1_collation_peers,
-		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
+		notification_sinks,
 	);
 
 	send_validation_message_v2(
-		net,
 		v2_validation_peers,
-		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
+		notification_sinks,
 	);
 
 	send_collation_message_v2(
-		net,
 		v2_collation_peers,
-		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
+		notification_sinks,
 	);
 
 	send_validation_message_vstaging(
-		net,
 		vstaging_validation_peers,
-		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
+		notification_sinks,
 	);
 
 	let our_view = OurView::new(
diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs
index f784e78a7f2..e0b86feb644 100644
--- a/polkadot/node/network/bridge/src/rx/tests.rs
+++ b/polkadot/node/network/bridge/src/rx/tests.rs
@@ -15,7 +15,7 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 use super::*;
-use futures::{channel::oneshot, executor, stream::BoxStream};
+use futures::{channel::oneshot, executor};
 use overseer::jaeger;
 use polkadot_node_network_protocol::{self as net_protocol, OurView};
 use polkadot_node_subsystem::messages::NetworkBridgeEvent;
@@ -26,10 +26,13 @@ use parking_lot::Mutex;
 use std::{
 	collections::HashSet,
 	sync::atomic::{AtomicBool, Ordering},
-	task::Poll,
 };
 
-use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange};
+use sc_network::{
+	service::traits::{Direction, MessageSink, NotificationService},
+	IfDisconnected, Multiaddr, ObservedRole as SubstrateObservedRole, ProtocolName,
+	ReputationChange, Roles,
+};
 
 use polkadot_node_network_protocol::{
 	peer_set::PeerSetProtocolNames,
@@ -47,9 +50,8 @@ use polkadot_node_subsystem_test_helpers::{
 	mock::new_leaf, SingleItemSink, SingleItemStream, TestSubsystemContextHandle,
 };
 use polkadot_node_subsystem_util::metered;
-use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash};
+use polkadot_primitives::{AuthorityDiscoveryId, Hash};
 
-use sc_network::Multiaddr;
 use sp_keyring::Sr25519Keyring;
 
 use crate::{network::Network, validator_discovery::AuthorityDiscovery};
@@ -64,10 +66,9 @@ pub enum NetworkAction {
 	WriteNotification(PeerId, PeerSet, Vec<u8>),
 }
 
-// The subsystem's view of the network - only supports a single call to `event_stream`.
+// The subsystem's view of the network.
 #[derive(Clone)]
 struct TestNetwork {
-	net_events: Arc<Mutex<Option<SingleItemStream<NetworkEvent>>>>,
 	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
 	protocol_names: Arc<PeerSetProtocolNames>,
 }
@@ -79,37 +80,42 @@ struct TestAuthorityDiscovery;
 // of `NetworkAction`s.
 struct TestNetworkHandle {
 	action_rx: metered::UnboundedMeteredReceiver<NetworkAction>,
-	net_tx: SingleItemSink<NetworkEvent>,
-	protocol_names: PeerSetProtocolNames,
+	validation_tx: SingleItemSink<NotificationEvent>,
+	collation_tx: SingleItemSink<NotificationEvent>,
 }
 
 fn new_test_network(
 	protocol_names: PeerSetProtocolNames,
-) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) {
-	let (net_tx, net_rx) = polkadot_node_subsystem_test_helpers::single_item_sink();
+) -> (
+	TestNetwork,
+	TestNetworkHandle,
+	TestAuthorityDiscovery,
+	Box<dyn NotificationService>,
+	Box<dyn NotificationService>,
+) {
 	let (action_tx, action_rx) = metered::unbounded();
+	let (validation_tx, validation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink();
+	let (collation_tx, collation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink();
+	let action_tx = Arc::new(Mutex::new(action_tx));
 
 	(
 		TestNetwork {
-			net_events: Arc::new(Mutex::new(Some(net_rx))),
-			action_tx: Arc::new(Mutex::new(action_tx)),
+			action_tx: action_tx.clone(),
 			protocol_names: Arc::new(protocol_names.clone()),
 		},
-		TestNetworkHandle { action_rx, net_tx, protocol_names },
+		TestNetworkHandle { action_rx, validation_tx, collation_tx },
 		TestAuthorityDiscovery,
+		Box::new(TestNotificationService::new(
+			PeerSet::Validation,
+			action_tx.clone(),
+			validation_rx,
+		)),
+		Box::new(TestNotificationService::new(PeerSet::Collation, action_tx, collation_rx)),
 	)
 }
 
 #[async_trait]
 impl Network for TestNetwork {
-	fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
-		self.net_events
-			.lock()
-			.take()
-			.expect("Subsystem made more than one call to `event_stream`")
-			.boxed()
-	}
-
 	async fn set_reserved_peers(
 		&mut self,
 		_protocol: ProtocolName,
@@ -143,7 +149,8 @@ impl Network for TestNetwork {
 	}
 
 	fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) {
-		let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap();
+		let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap();
+		assert_eq!(version, peer_set.get_main_version());
 
 		self.action_tx
 			.lock()
@@ -151,13 +158,10 @@ impl Network for TestNetwork {
 			.unwrap();
 	}
 
-	fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap();
-
-		self.action_tx
-			.lock()
-			.unbounded_send(NetworkAction::WriteNotification(who, peer_set, message))
-			.unwrap();
+	fn peer_role(&self, _peer_id: PeerId, handshake: Vec<u8>) -> Option<SubstrateObservedRole> {
+		Roles::decode_all(&mut &handshake[..])
+			.ok()
+			.and_then(|role| Some(SubstrateObservedRole::from(role)))
 	}
 }
 
@@ -201,35 +205,85 @@ impl TestNetworkHandle {
 		peer_set: PeerSet,
 		role: ObservedRole,
 	) {
-		let protocol_version = ProtocolVersion::from(protocol_version);
-		self.send_network_event(NetworkEvent::NotificationStreamOpened {
-			remote: peer,
-			protocol: self.protocol_names.get_name(peer_set, protocol_version),
-			negotiated_fallback: None,
-			role: role.into(),
-			received_handshake: vec![],
-		})
-		.await;
+		fn observed_role_to_handshake(role: &ObservedRole) -> Vec<u8> {
+			match role {
+				&ObservedRole::Light => Roles::LIGHT.encode(),
+				&ObservedRole::Authority => Roles::AUTHORITY.encode(),
+				&ObservedRole::Full => Roles::FULL.encode(),
+			}
+		}
+
+		// because of how protocol negotiation works, if two peers support at least one common
+		// protocol, the protocol is negotiated over the main protocol (`ValidationVersion::V2`) but
+		// if either one of the peers used a fallback protocol for the negotiation (meaning they
+		// don't support the main protocol but some older version of it ), `negotiated_fallback` is
+		// set to that protocol.
+		let negotiated_fallback = match protocol_version {
+			ValidationVersion::V2 => None,
+			ValidationVersion::V1 => match peer_set {
+				PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/1")),
+				PeerSet::Collation => Some(ProtocolName::from("/polkadot/collation/1")),
+			},
+			ValidationVersion::VStaging => match peer_set {
+				PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/3")),
+				PeerSet::Collation => unreachable!(),
+			},
+		};
+
+		match peer_set {
+			PeerSet::Validation => {
+				self.validation_tx
+					.send(NotificationEvent::NotificationStreamOpened {
+						peer,
+						direction: Direction::Inbound,
+						handshake: observed_role_to_handshake(&role),
+						negotiated_fallback,
+					})
+					.await
+					.expect("subsystem concluded early");
+			},
+			PeerSet::Collation => {
+				self.collation_tx
+					.send(NotificationEvent::NotificationStreamOpened {
+						peer,
+						direction: Direction::Inbound,
+						handshake: observed_role_to_handshake(&role),
+						negotiated_fallback,
+					})
+					.await
+					.expect("subsystem concluded early");
+			},
+		}
 	}
 
 	async fn disconnect_peer(&mut self, peer: PeerId, peer_set: PeerSet) {
-		self.send_network_event(NetworkEvent::NotificationStreamClosed {
-			remote: peer,
-			protocol: self.protocol_names.get_main_name(peer_set),
-		})
-		.await;
+		match peer_set {
+			PeerSet::Validation => self
+				.validation_tx
+				.send(NotificationEvent::NotificationStreamClosed { peer })
+				.await
+				.expect("subsystem concluded early"),
+			PeerSet::Collation => self
+				.collation_tx
+				.send(NotificationEvent::NotificationStreamClosed { peer })
+				.await
+				.expect("subsystem concluded early"),
+		}
 	}
 
 	async fn peer_message(&mut self, peer: PeerId, peer_set: PeerSet, message: Vec<u8>) {
-		self.send_network_event(NetworkEvent::NotificationsReceived {
-			remote: peer,
-			messages: vec![(self.protocol_names.get_main_name(peer_set), message.into())],
-		})
-		.await;
-	}
-
-	async fn send_network_event(&mut self, event: NetworkEvent) {
-		self.net_tx.send(event).await.expect("subsystem concluded early");
+		match peer_set {
+			PeerSet::Validation => self
+				.validation_tx
+				.send(NotificationEvent::NotificationReceived { peer, notification: message })
+				.await
+				.expect("subsystem concluded early"),
+			PeerSet::Collation => self
+				.collation_tx
+				.send(NotificationEvent::NotificationReceived { peer, notification: message })
+				.await
+				.expect("subsystem concluded early"),
+		}
 	}
 }
 
@@ -240,6 +294,121 @@ fn assert_network_actions_contains(actions: &[NetworkAction], action: &NetworkAc
 	}
 }
 
+struct TestNotificationService {
+	peer_set: PeerSet,
+	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+	rx: SingleItemStream<NotificationEvent>,
+}
+
+impl std::fmt::Debug for TestNotificationService {
+	fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+		Ok(())
+	}
+}
+
+impl TestNotificationService {
+	pub fn new(
+		peer_set: PeerSet,
+		action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+		rx: SingleItemStream<NotificationEvent>,
+	) -> Self {
+		Self { peer_set, action_tx, rx }
+	}
+}
+
+struct TestMessageSink {
+	peer: PeerId,
+	peer_set: PeerSet,
+	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+}
+
+impl TestMessageSink {
+	fn new(
+		peer: PeerId,
+		peer_set: PeerSet,
+		action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+	) -> TestMessageSink {
+		Self { peer, peer_set, action_tx }
+	}
+}
+
+#[async_trait::async_trait]
+impl MessageSink for TestMessageSink {
+	fn send_sync_notification(&self, notification: Vec<u8>) {
+		self.action_tx
+			.lock()
+			.unbounded_send(NetworkAction::WriteNotification(
+				self.peer,
+				self.peer_set,
+				notification,
+			))
+			.unwrap();
+	}
+
+	async fn send_async_notification(
+		&self,
+		_notification: Vec<u8>,
+	) -> Result<(), sc_network::error::Error> {
+		unimplemented!();
+	}
+}
+
+#[async_trait::async_trait]
+impl NotificationService for TestNotificationService {
+	/// Instruct `Notifications` to open a new substream for `peer`.
+	async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Instruct `Notifications` to close substream for `peer`.
+	async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Send synchronous `notification` to `peer`.
+	fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+		unimplemented!();
+	}
+
+	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
+	async fn send_async_notification(
+		&self,
+		_peer: &PeerId,
+		_notification: Vec<u8>,
+	) -> Result<(), sc_network::error::Error> {
+		unimplemented!();
+	}
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	async fn set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	fn try_set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Get next event from the `Notifications` event stream.
+	async fn next_event(&mut self) -> Option<NotificationEvent> {
+		self.rx.next().await
+	}
+
+	// Clone [`NotificationService`]
+	fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+		unimplemented!();
+	}
+
+	/// Get protocol name.
+	fn protocol(&self) -> &ProtocolName {
+		unimplemented!();
+	}
+
+	/// Get notification sink of the peer.
+	fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+		Some(Box::new(TestMessageSink::new(*peer, self.peer_set, self.action_tx.clone())))
+	}
+}
+
 #[derive(Clone)]
 struct TestSyncOracle {
 	is_major_syncing: Arc<AtomicBool>,
@@ -335,10 +504,11 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 	let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id);
 
 	let pool = sp_core::testing::TaskExecutor::new();
-	let (mut network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone());
+	let (network, network_handle, discovery, validation_service, collation_service) =
+		new_test_network(peerset_protocol_names.clone());
 	let (context, virtual_overseer) =
 		polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-	let network_stream = network.event_stream();
+	let notification_sinks = Arc::new(Mutex::new(HashMap::new()));
 	let shared = Shared::default();
 
 	let bridge = NetworkBridgeRx {
@@ -348,9 +518,12 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 		sync_oracle,
 		shared: shared.clone(),
 		peerset_protocol_names,
+		validation_service,
+		collation_service,
+		notification_sinks,
 	};
 
-	let network_bridge = run_network_in(bridge, context, network_stream)
+	let network_bridge = run_network_in(bridge, context)
 		.map_err(|_| panic!("subsystem execution failed"))
 		.map(|_| ());
 
@@ -942,8 +1115,6 @@ fn relays_collation_protocol_messages() {
 			.await;
 		}
 
-		// peer A gets reported for sending a collation message.
-
 		let collator_protocol_message = protocol_v1::CollatorProtocolMessage::Declare(
 			Sr25519Keyring::Alice.public().into(),
 			Default::default(),
@@ -953,19 +1124,23 @@ fn relays_collation_protocol_messages() {
 		let message_v1 =
 			protocol_v1::CollationProtocol::CollatorProtocol(collator_protocol_message.clone());
 
-		network_handle
-			.peer_message(
-				peer_a,
-				PeerSet::Collation,
-				WireMessage::ProtocolMessage(message_v1.clone()).encode(),
-			)
-			.await;
-
-		let actions = network_handle.next_network_actions(3).await;
-		assert_network_actions_contains(
-			&actions,
-			&NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()),
-		);
+		// peer A gets reported for sending a collation message.
+		// NOTE: this is not possible since peer A cannot send
+		// a collation message if it has not opened a collation protocol
+
+		// network_handle
+		// 	.peer_message(
+		// 		peer_a,
+		// 		PeerSet::Collation,
+		// 		WireMessage::ProtocolMessage(message_v1.clone()).encode(),
+		// 	)
+		// 	.await;
+
+		// let actions = network_handle.next_network_actions(3).await;
+		// assert_network_actions_contains(
+		// 	&actions,
+		// 	&NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()),
+		// );
 
 		// peer B has the message relayed.
 
@@ -1212,7 +1387,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() {
 fn network_protocol_versioning_view_update() {
 	let (oracle, handle) = make_sync_oracle(false);
 	test_harness(Box::new(oracle), |test_harness| async move {
-		let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness;
+		let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness;
 
 		let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect();
 		let peers = [
@@ -1231,12 +1406,22 @@ fn network_protocol_versioning_view_update() {
 
 		handle.await_mode_switch().await;
 
+		let mut total_validation_peers = 0;
+		let mut total_collation_peers = 0;
+
 		for &(peer_id, peer_set, version) in &peers {
 			network_handle
 				.connect_peer(peer_id, version, peer_set, ObservedRole::Full)
 				.await;
+
+			match peer_set {
+				PeerSet::Validation => total_validation_peers += 1,
+				PeerSet::Collation => total_collation_peers += 1,
+			}
 		}
 
+		await_peer_connections(&shared, total_validation_peers, total_collation_peers).await;
+
 		let view = view![head];
 		let actions = network_handle.next_network_actions(4).await;
 
@@ -1264,15 +1449,19 @@ fn network_protocol_versioning_view_update() {
 
 #[test]
 fn network_protocol_versioning_subsystem_msg() {
+	use polkadot_primitives::CandidateHash;
+	use std::task::Poll;
+
 	let (oracle, _handle) = make_sync_oracle(false);
 	test_harness(Box::new(oracle), |test_harness| async move {
-		let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness;
+		let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness;
 
 		let peer = PeerId::random();
 
 		network_handle
 			.connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full)
 			.await;
+		await_peer_connections(&shared, 1, 0).await;
 
 		// bridge will inform about all connected peers.
 		{
diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs
index 5f222ad59c7..bdcd1574e33 100644
--- a/polkadot/node/network/bridge/src/tx/mod.rs
+++ b/polkadot/node/network/bridge/src/tx/mod.rs
@@ -32,7 +32,7 @@ use polkadot_node_subsystem::{
 /// To be passed to [`FullNetworkConfiguration::add_notification_protocol`]().
 pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
 use polkadot_node_network_protocol::request_response::Requests;
-use sc_network::ReputationChange;
+use sc_network::{MessageSink, ReputationChange};
 
 use crate::validator_discovery;
 
@@ -60,6 +60,7 @@ pub struct NetworkBridgeTx<N, AD> {
 	metrics: Metrics,
 	req_protocol_names: ReqProtocolNames,
 	peerset_protocol_names: PeerSetProtocolNames,
+	notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 }
 
 impl<N, AD> NetworkBridgeTx<N, AD> {
@@ -74,6 +75,7 @@ impl<N, AD> NetworkBridgeTx<N, AD> {
 		metrics: Metrics,
 		req_protocol_names: ReqProtocolNames,
 		peerset_protocol_names: PeerSetProtocolNames,
+		notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 	) -> Self {
 		Self {
 			network_service,
@@ -81,6 +83,7 @@ impl<N, AD> NetworkBridgeTx<N, AD> {
 			metrics,
 			req_protocol_names,
 			peerset_protocol_names,
+			notification_sinks,
 		}
 	}
 }
@@ -107,6 +110,7 @@ async fn handle_subsystem_messages<Context, N, AD>(
 	metrics: Metrics,
 	req_protocol_names: ReqProtocolNames,
 	peerset_protocol_names: PeerSetProtocolNames,
+	notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) -> Result<(), Error>
 where
 	N: Network,
@@ -130,6 +134,7 @@ where
 						&metrics,
 						&req_protocol_names,
 						&peerset_protocol_names,
+						&notification_sinks,
 					)
 					.await;
 			},
@@ -140,13 +145,14 @@ where
 #[overseer::contextbounds(NetworkBridgeTx, prefix = self::overseer)]
 async fn handle_incoming_subsystem_communication<Context, N, AD>(
 	_ctx: &mut Context,
-	mut network_service: N,
+	network_service: N,
 	validator_discovery: &mut validator_discovery::Service<N, AD>,
 	mut authority_discovery_service: AD,
 	msg: NetworkBridgeTxMessage,
 	metrics: &Metrics,
 	req_protocol_names: &ReqProtocolNames,
 	peerset_protocol_names: &PeerSetProtocolNames,
+	notification_sinks: &Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
 ) -> (N, AD)
 where
 	N: Network,
@@ -194,25 +200,22 @@ where
 
 			match msg {
 				Versioned::V1(msg) => send_validation_message_v1(
-					&mut network_service,
 					peers,
-					peerset_protocol_names,
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
+					notification_sinks,
 				),
 				Versioned::VStaging(msg) => send_validation_message_vstaging(
-					&mut network_service,
 					peers,
-					peerset_protocol_names,
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
+					notification_sinks,
 				),
 				Versioned::V2(msg) => send_validation_message_v2(
-					&mut network_service,
 					peers,
-					peerset_protocol_names,
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
+					notification_sinks,
 				),
 			}
 		},
@@ -227,25 +230,22 @@ where
 			for (peers, msg) in msgs {
 				match msg {
 					Versioned::V1(msg) => send_validation_message_v1(
-						&mut network_service,
 						peers,
-						peerset_protocol_names,
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
+						notification_sinks,
 					),
 					Versioned::VStaging(msg) => send_validation_message_vstaging(
-						&mut network_service,
 						peers,
-						peerset_protocol_names,
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
+						notification_sinks,
 					),
 					Versioned::V2(msg) => send_validation_message_v2(
-						&mut network_service,
 						peers,
-						peerset_protocol_names,
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
+						notification_sinks,
 					),
 				}
 			}
@@ -259,18 +259,16 @@ where
 
 			match msg {
 				Versioned::V1(msg) => send_collation_message_v1(
-					&mut network_service,
 					peers,
-					peerset_protocol_names,
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
+					notification_sinks,
 				),
 				Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2(
-					&mut network_service,
 					peers,
-					peerset_protocol_names,
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
+					notification_sinks,
 				),
 			}
 		},
@@ -284,18 +282,16 @@ where
 			for (peers, msg) in msgs {
 				match msg {
 					Versioned::V1(msg) => send_collation_message_v1(
-						&mut network_service,
 						peers,
-						peerset_protocol_names,
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
+						notification_sinks,
 					),
 					Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2(
-						&mut network_service,
 						peers,
-						peerset_protocol_names,
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
+						notification_sinks,
 					),
 				}
 			}
@@ -389,6 +385,7 @@ where
 		metrics,
 		req_protocol_names,
 		peerset_protocol_names,
+		notification_sinks,
 	} = bridge;
 
 	handle_subsystem_messages(
@@ -398,6 +395,7 @@ where
 		metrics,
 		req_protocol_names,
 		peerset_protocol_names,
+		notification_sinks,
 	)
 	.await?;
 
diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs
index 1a2d9a7a424..c3cf0f322f6 100644
--- a/polkadot/node/network/bridge/src/tx/tests.rs
+++ b/polkadot/node/network/bridge/src/tx/tests.rs
@@ -15,15 +15,18 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 use super::*;
-use futures::{executor, stream::BoxStream};
+use futures::executor;
 use polkadot_node_subsystem_util::TimeoutExt;
 
 use async_trait::async_trait;
 use parking_lot::Mutex;
 use std::collections::HashSet;
 
-use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange};
+use sc_network::{
+	IfDisconnected, ObservedRole as SubstrateObservedRole, ProtocolName, ReputationChange, Roles,
+};
 
+use parity_scale_codec::DecodeAll;
 use polkadot_node_network_protocol::{
 	peer_set::{PeerSetProtocolNames, ValidationVersion},
 	request_response::{outgoing::Requests, ReqProtocolNames},
@@ -51,10 +54,9 @@ pub enum NetworkAction {
 	WriteNotification(PeerId, PeerSet, Vec<u8>),
 }
 
-// The subsystem's view of the network - only supports a single call to `event_stream`.
+// The subsystem's view of the network.
 #[derive(Clone)]
 struct TestNetwork {
-	net_events: Arc<Mutex<Option<metered::MeteredReceiver<NetworkEvent>>>>,
 	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
 	peerset_protocol_names: Arc<PeerSetProtocolNames>,
 }
@@ -66,37 +68,78 @@ struct TestAuthorityDiscovery;
 // of `NetworkAction`s.
 struct TestNetworkHandle {
 	action_rx: metered::UnboundedMeteredReceiver<NetworkAction>,
-	net_tx: metered::MeteredSender<NetworkEvent>,
-	peerset_protocol_names: PeerSetProtocolNames,
+	_peerset_protocol_names: PeerSetProtocolNames,
+	notification_sinks: Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+}
+
+struct TestMessageSink {
+	peer: PeerId,
+	peer_set: PeerSet,
+	action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+}
+
+impl TestMessageSink {
+	fn new(
+		peer: PeerId,
+		peer_set: PeerSet,
+		action_tx: Arc<Mutex<metered::UnboundedMeteredSender<NetworkAction>>>,
+	) -> TestMessageSink {
+		Self { peer, peer_set, action_tx }
+	}
+}
+
+#[async_trait::async_trait]
+impl MessageSink for TestMessageSink {
+	fn send_sync_notification(&self, notification: Vec<u8>) {
+		self.action_tx
+			.lock()
+			.unbounded_send(NetworkAction::WriteNotification(
+				self.peer,
+				self.peer_set,
+				notification,
+			))
+			.unwrap();
+	}
+
+	async fn send_async_notification(
+		&self,
+		_notification: Vec<u8>,
+	) -> Result<(), sc_network::error::Error> {
+		unimplemented!();
+	}
 }
 
 fn new_test_network(
 	peerset_protocol_names: PeerSetProtocolNames,
-) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) {
-	let (net_tx, net_rx) = metered::channel(10);
+) -> (
+	TestNetwork,
+	TestNetworkHandle,
+	TestAuthorityDiscovery,
+	Arc<Mutex<HashMap<(PeerSet, PeerId), Box<dyn MessageSink>>>>,
+) {
 	let (action_tx, action_rx) = metered::unbounded();
+	let notification_sinks = Arc::new(Mutex::new(HashMap::new()));
+	let action_tx = Arc::new(Mutex::new(action_tx));
 
 	(
 		TestNetwork {
-			net_events: Arc::new(Mutex::new(Some(net_rx))),
-			action_tx: Arc::new(Mutex::new(action_tx)),
+			action_tx: action_tx.clone(),
 			peerset_protocol_names: Arc::new(peerset_protocol_names.clone()),
 		},
-		TestNetworkHandle { action_rx, net_tx, peerset_protocol_names },
+		TestNetworkHandle {
+			action_rx,
+			_peerset_protocol_names: peerset_protocol_names,
+			action_tx,
+			notification_sinks: notification_sinks.clone(),
+		},
 		TestAuthorityDiscovery,
+		notification_sinks,
 	)
 }
 
 #[async_trait]
 impl Network for TestNetwork {
-	fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
-		self.net_events
-			.lock()
-			.take()
-			.expect("Subsystem made more than one call to `event_stream`")
-			.boxed()
-	}
-
 	async fn set_reserved_peers(
 		&mut self,
 		_protocol: ProtocolName,
@@ -130,7 +173,8 @@ impl Network for TestNetwork {
 	}
 
 	fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) {
-		let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap();
+		let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap();
+		assert_eq!(version, peer_set.get_main_version());
 
 		self.action_tx
 			.lock()
@@ -138,13 +182,10 @@ impl Network for TestNetwork {
 			.unwrap();
 	}
 
-	fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap();
-
-		self.action_tx
-			.lock()
-			.unbounded_send(NetworkAction::WriteNotification(who, peer_set, message))
-			.unwrap();
+	fn peer_role(&self, _peer_id: PeerId, handshake: Vec<u8>) -> Option<SubstrateObservedRole> {
+		Roles::decode_all(&mut &handshake[..])
+			.ok()
+			.and_then(|role| Some(SubstrateObservedRole::from(role)))
 	}
 }
 
@@ -174,23 +215,14 @@ impl TestNetworkHandle {
 	async fn connect_peer(
 		&mut self,
 		peer: PeerId,
-		protocol_version: ValidationVersion,
+		_protocol_version: ValidationVersion,
 		peer_set: PeerSet,
-		role: ObservedRole,
+		_role: ObservedRole,
 	) {
-		let protocol_version = ProtocolVersion::from(protocol_version);
-		self.send_network_event(NetworkEvent::NotificationStreamOpened {
-			remote: peer,
-			protocol: self.peerset_protocol_names.get_name(peer_set, protocol_version),
-			negotiated_fallback: None,
-			role: role.into(),
-			received_handshake: vec![],
-		})
-		.await;
-	}
-
-	async fn send_network_event(&mut self, event: NetworkEvent) {
-		self.net_tx.send(event).await.expect("subsystem concluded early");
+		self.notification_sinks.lock().insert(
+			(peer_set, peer),
+			Box::new(TestMessageSink::new(peer, peer_set, self.action_tx.clone())),
+		);
 	}
 }
 
@@ -208,7 +240,8 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(TestHarne
 	let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id);
 
 	let pool = sp_core::testing::TaskExecutor::new();
-	let (network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone());
+	let (network, network_handle, discovery, network_notification_sinks) =
+		new_test_network(peerset_protocol_names.clone());
 
 	let (context, virtual_overseer) =
 		polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
@@ -219,6 +252,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(TestHarne
 		Metrics(None),
 		req_protocol_names,
 		peerset_protocol_names,
+		network_notification_sinks,
 	);
 
 	let network_bridge_out_fut = run_network_out(bridge_out, context)
@@ -364,9 +398,9 @@ fn network_protocol_versioning_send() {
 				approval_distribution_message.clone(),
 			);
 
-			// Note that bridge doesn't ensure neither peer's protocol version
-			// or peer set match the message.
-			let receivers = vec![peer_ids[0], peer_ids[3]];
+			// only `peer_ids[0]` opened the validation protocol v2
+			// so only they will be sent a notification
+			let receivers = vec![peer_ids[0]];
 			virtual_overseer
 				.send(FromOrchestra::Communication {
 					msg: NetworkBridgeTxMessage::SendValidationMessage(
@@ -406,7 +440,9 @@ fn network_protocol_versioning_send() {
 			let msg =
 				protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone());
 
-			let receivers = vec![peer_ids[1], peer_ids[2]];
+			// only `peer_ids[0]` opened the collation protocol v2
+			// so only they will be sent a notification
+			let receivers = vec![peer_ids[1]];
 
 			virtual_overseer
 				.send(FromOrchestra::Communication {
diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs
index 86e861fbc5b..b11af8a8a08 100644
--- a/polkadot/node/network/bridge/src/validator_discovery.rs
+++ b/polkadot/node/network/bridge/src/validator_discovery.rs
@@ -169,13 +169,12 @@ mod tests {
 	use crate::network::Network;
 
 	use async_trait::async_trait;
-	use futures::stream::BoxStream;
 	use polkadot_node_network_protocol::{
 		request_response::{outgoing::Requests, ReqProtocolNames},
 		PeerId,
 	};
 	use polkadot_primitives::Hash;
-	use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange};
+	use sc_network::{IfDisconnected, ProtocolName, ReputationChange};
 	use sp_keyring::Sr25519Keyring;
 	use std::collections::{HashMap, HashSet};
 
@@ -224,10 +223,6 @@ mod tests {
 
 	#[async_trait]
 	impl Network for TestNetwork {
-		fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
-			panic!()
-		}
-
 		async fn set_reserved_peers(
 			&mut self,
 			_protocol: ProtocolName,
@@ -263,7 +258,11 @@ mod tests {
 			panic!()
 		}
 
-		fn write_notification(&self, _: PeerId, _: ProtocolName, _: Vec<u8>) {
+		fn peer_role(
+			&self,
+			_peer_id: PeerId,
+			_handshake: Vec<u8>,
+		) -> Option<sc_network::ObservedRole> {
 			panic!()
 		}
 	}
diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs
index eb483dec970..7e257d508b5 100644
--- a/polkadot/node/network/protocol/src/peer_set.rs
+++ b/polkadot/node/network/protocol/src/peer_set.rs
@@ -21,6 +21,7 @@ use polkadot_primitives::Hash;
 use sc_network::{
 	config::{NonDefaultSetConfig, SetConfig},
 	types::ProtocolName,
+	NotificationService,
 };
 use std::{
 	collections::{hash_map::Entry, HashMap},
@@ -68,7 +69,7 @@ impl PeerSet {
 		self,
 		is_authority: IsAuthority,
 		peerset_protocol_names: &PeerSetProtocolNames,
-	) -> NonDefaultSetConfig {
+	) -> (NonDefaultSetConfig, (PeerSet, Box<dyn NotificationService>)) {
 		// Networking layer relies on `get_main_name()` being the main name of the protocol
 		// for peersets and connection management.
 		let protocol = peerset_protocol_names.get_main_name(self);
@@ -76,39 +77,47 @@ impl PeerSet {
 		let max_notification_size = self.get_max_notification_size(is_authority);
 
 		match self {
-			PeerSet::Validation => NonDefaultSetConfig {
-				notifications_protocol: protocol,
-				fallback_names,
-				max_notification_size,
-				handshake: None,
-				set_config: SetConfig {
-					// we allow full nodes to connect to validators for gossip
-					// to ensure any `MIN_GOSSIP_PEERS` always include reserved peers
-					// we limit the amount of non-reserved slots to be less
-					// than `MIN_GOSSIP_PEERS` in total
-					in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1,
-					out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1,
-					reserved_nodes: Vec::new(),
-					non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept,
-				},
+			PeerSet::Validation => {
+				let (config, notification_service) = NonDefaultSetConfig::new(
+					protocol,
+					fallback_names,
+					max_notification_size,
+					None,
+					SetConfig {
+						// we allow full nodes to connect to validators for gossip
+						// to ensure any `MIN_GOSSIP_PEERS` always include reserved peers
+						// we limit the amount of non-reserved slots to be less
+						// than `MIN_GOSSIP_PEERS` in total
+						in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1,
+						out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1,
+						reserved_nodes: Vec::new(),
+						non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept,
+					},
+				);
+
+				(config, (PeerSet::Validation, notification_service))
 			},
-			PeerSet::Collation => NonDefaultSetConfig {
-				notifications_protocol: protocol,
-				fallback_names,
-				max_notification_size,
-				handshake: None,
-				set_config: SetConfig {
-					// Non-authority nodes don't need to accept incoming connections on this peer
-					// set:
-					in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 },
-					out_peers: 0,
-					reserved_nodes: Vec::new(),
-					non_reserved_mode: if is_authority == IsAuthority::Yes {
-						sc_network::config::NonReservedPeerMode::Accept
-					} else {
-						sc_network::config::NonReservedPeerMode::Deny
+			PeerSet::Collation => {
+				let (config, notification_service) = NonDefaultSetConfig::new(
+					protocol,
+					fallback_names,
+					max_notification_size,
+					None,
+					SetConfig {
+						// Non-authority nodes don't need to accept incoming connections on this
+						// peer set:
+						in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 },
+						out_peers: 0,
+						reserved_nodes: Vec::new(),
+						non_reserved_mode: if is_authority == IsAuthority::Yes {
+							sc_network::config::NonReservedPeerMode::Accept
+						} else {
+							sc_network::config::NonReservedPeerMode::Deny
+						},
 					},
-				},
+				);
+
+				(config, (PeerSet::Collation, notification_service))
 			},
 		}
 	}
@@ -204,7 +213,7 @@ impl<T> IndexMut<PeerSet> for PerPeerSet<T> {
 pub fn peer_sets_info(
 	is_authority: IsAuthority,
 	peerset_protocol_names: &PeerSetProtocolNames,
-) -> Vec<NonDefaultSetConfig> {
+) -> Vec<(NonDefaultSetConfig, (PeerSet, Box<dyn NotificationService>))> {
 	PeerSet::iter()
 		.map(|s| s.get_info(is_authority, &peerset_protocol_names))
 		.collect()
@@ -286,7 +295,7 @@ impl From<CollationVersion> for ProtocolVersion {
 }
 
 /// On the wire protocol name to [`PeerSet`] mapping.
-#[derive(Clone)]
+#[derive(Debug, Clone)]
 pub struct PeerSetProtocolNames {
 	protocols: HashMap<ProtocolName, (PeerSet, ProtocolVersion)>,
 	names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>,
diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml
index e7a4f4a825c..19efd1b66c4 100644
--- a/polkadot/node/service/Cargo.toml
+++ b/polkadot/node/service/Cargo.toml
@@ -89,6 +89,7 @@ kvdb = "0.13.0"
 kvdb-rocksdb = { version = "0.19.0", optional = true }
 parity-db = { version = "0.4.8", optional = true }
 codec = { package = "parity-scale-codec", version = "3.6.1" }
+parking_lot = "0.12.1"
 
 # Polkadot
 polkadot-core-primitives = { path = "../../core-primitives" }
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 5069ec467c9..70159301fc4 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -51,7 +51,8 @@ use {
 	},
 	polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig,
 	polkadot_node_network_protocol::{
-		peer_set::PeerSetProtocolNames, request_response::ReqProtocolNames,
+		peer_set::{PeerSet, PeerSetProtocolNames},
+		request_response::ReqProtocolNames,
 	},
 	sc_client_api::BlockBackend,
 	sc_transaction_pool_api::OffchainTransactionPoolFactory,
@@ -74,7 +75,7 @@ pub use {
 #[cfg(feature = "full-node")]
 use polkadot_node_subsystem::jaeger;
 
-use std::{path::PathBuf, sync::Arc, time::Duration};
+use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration};
 
 use prometheus_endpoint::Registry;
 #[cfg(feature = "full-node")]
@@ -809,9 +810,9 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	// anything in terms of behaviour, but makes the logs more consistent with the other
 	// Substrate nodes.
 	let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
-	net_config.add_notification_protocol(grandpa::grandpa_peers_set_config(
-		grandpa_protocol_name.clone(),
-	));
+	let (grandpa_protocol_config, grandpa_notification_service) =
+		grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+	net_config.add_notification_protocol(grandpa_protocol_config);
 
 	let beefy_gossip_proto_name =
 		beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id());
@@ -824,12 +825,17 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 			client.clone(),
 			prometheus_registry.clone(),
 		);
-	if enable_beefy {
-		net_config.add_notification_protocol(beefy::communication::beefy_peers_set_config(
-			beefy_gossip_proto_name.clone(),
-		));
-		net_config.add_request_response_protocol(beefy_req_resp_cfg);
-	}
+	let beefy_notification_service = match enable_beefy {
+		false => None,
+		true => {
+			let (beefy_notification_config, beefy_notification_service) =
+				beefy::communication::beefy_peers_set_config(beefy_gossip_proto_name.clone());
+
+			net_config.add_notification_protocol(beefy_notification_config);
+			net_config.add_request_response_protocol(beefy_req_resp_cfg);
+			Some(beefy_notification_service)
+		},
+	};
 
 	// validation/collation protocols are enabled only if `Overseer` is enabled
 	let peerset_protocol_names =
@@ -840,13 +846,21 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	//
 	// Collators and parachain full nodes require the collator and validator networking to send
 	// collations and to be able to recover PoVs.
-	if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() {
-		use polkadot_network_bridge::{peer_sets_info, IsAuthority};
-		let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
-		for config in peer_sets_info(is_authority, &peerset_protocol_names) {
-			net_config.add_notification_protocol(config);
-		}
-	}
+	let notification_services =
+		if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() {
+			use polkadot_network_bridge::{peer_sets_info, IsAuthority};
+			let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
+
+			peer_sets_info(is_authority, &peerset_protocol_names)
+				.into_iter()
+				.map(|(config, (peerset, service))| {
+					net_config.add_notification_protocol(config);
+					(peerset, service)
+				})
+				.collect::<HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>()
+		} else {
+			std::collections::HashMap::new()
+		};
 
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id());
 
@@ -1078,6 +1092,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 					offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new(
 						transaction_pool.clone(),
 					),
+					notification_services,
 				},
 			)
 			.map_err(|e| {
@@ -1179,13 +1194,15 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	// need a keystore, regardless of which protocol we use below.
 	let keystore_opt = if role.is_authority() { Some(keystore_container.keystore()) } else { None };
 
-	if enable_beefy {
+	// beefy is enabled if its notification service exists
+	if let Some(notification_service) = beefy_notification_service {
 		let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name();
 		let network_params = beefy::BeefyNetworkParams {
 			network: network.clone(),
 			sync: sync_service.clone(),
 			gossip_protocol_name: beefy_gossip_proto_name,
 			justifications_protocol_name,
+			notification_service,
 			_phantom: core::marker::PhantomData::<Block>,
 		};
 		let payload_provider = beefy_primitives::mmr::MmrRootProvider::new(client.clone());
@@ -1265,6 +1282,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 			prometheus_registry: prometheus_registry.clone(),
 			shared_voter_state,
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
+			notification_service: grandpa_notification_service,
 			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
 		};
 
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index fd618863eea..599563d6454 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -26,7 +26,7 @@ use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig
 use polkadot_node_core_chain_selection::Config as ChainSelectionConfig;
 use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
 use polkadot_node_network_protocol::{
-	peer_set::PeerSetProtocolNames,
+	peer_set::{PeerSet, PeerSetProtocolNames},
 	request_response::{
 		v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames,
 	},
@@ -41,15 +41,16 @@ use polkadot_overseer::{
 	OverseerConnector, OverseerHandle, SpawnGlue,
 };
 
+use parking_lot::Mutex;
 use polkadot_primitives::runtime_api::ParachainHost;
 use sc_authority_discovery::Service as AuthorityDiscoveryService;
 use sc_client_api::AuxStore;
 use sc_keystore::LocalKeystore;
-use sc_network::NetworkStateInfo;
+use sc_network::{NetworkStateInfo, NotificationService};
 use sp_api::ProvideRuntimeApi;
 use sp_blockchain::HeaderBackend;
 use sp_consensus_babe::BabeApi;
-use std::sync::Arc;
+use std::{collections::HashMap, sync::Arc};
 
 pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem;
 pub use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem;
@@ -140,6 +141,8 @@ where
 	pub peerset_protocol_names: PeerSetProtocolNames,
 	/// The offchain transaction pool factory.
 	pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory<Block>,
+	/// Notification services for validation/collation protocols.
+	pub notification_services: HashMap<PeerSet, Box<dyn NotificationService>>,
 }
 
 /// Obtain a prepared `OverseerBuilder`, that is initialized
@@ -173,6 +176,7 @@ pub fn prepared_overseer_builder<Spawner, RuntimeClient>(
 		req_protocol_names,
 		peerset_protocol_names,
 		offchain_transaction_pool_factory,
+		notification_services,
 	}: OverseerGenArgs<Spawner, RuntimeClient>,
 ) -> Result<
 	InitializedOverseerBuilder<
@@ -218,6 +222,7 @@ where
 	use polkadot_node_subsystem_util::metrics::Metrics;
 
 	let metrics = <OverseerMetrics as MetricsTrait>::register(registry)?;
+	let notification_sinks = Arc::new(Mutex::new(HashMap::new()));
 
 	let spawner = SpawnGlue(spawner);
 
@@ -235,6 +240,7 @@ where
 			network_bridge_metrics.clone(),
 			req_protocol_names,
 			peerset_protocol_names.clone(),
+			notification_sinks.clone(),
 		))
 		.network_bridge_rx(NetworkBridgeRxSubsystem::new(
 			network_service.clone(),
@@ -242,6 +248,8 @@ where
 			Box::new(sync_service.clone()),
 			network_bridge_metrics,
 			peerset_protocol_names,
+			notification_services,
+			notification_sinks,
 		))
 		.availability_distribution(AvailabilityDistributionSubsystem::new(
 			keystore.clone(),
diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs
index 40320282924..e69428d8190 100644
--- a/substrate/bin/node-template/node/src/service.rs
+++ b/substrate/bin/node-template/node/src/service.rs
@@ -163,9 +163,9 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 		&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
 		&config.chain_spec,
 	);
-	net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config(
-		grandpa_protocol_name.clone(),
-	));
+	let (grandpa_protocol_config, grandpa_notification_service) =
+		sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+	net_config.add_notification_protocol(grandpa_protocol_config);
 
 	let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
 		backend.clone(),
@@ -316,6 +316,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 			link: grandpa_link,
 			network,
 			sync: Arc::new(sync_service),
+			notification_service: grandpa_notification_service,
 			voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
 			prometheus_registry,
 			shared_voter_state: SharedVoterState::empty(),
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 1c71b5a3956..a746de8de84 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -370,28 +370,28 @@ pub fn new_full_base(
 	let shared_voter_state = rpc_setup;
 	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
 	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
-
 	let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
 
 	let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
-	net_config.add_notification_protocol(grandpa::grandpa_peers_set_config(
-		grandpa_protocol_name.clone(),
-	));
+	let (grandpa_protocol_config, grandpa_notification_service) =
+		grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+	net_config.add_notification_protocol(grandpa_protocol_config);
 
-	let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new(
-		genesis_hash,
-		config.chain_spec.fork_id(),
-	);
-	net_config.add_notification_protocol(statement_handler_proto.set_config());
+	let (statement_handler_proto, statement_config) =
+		sc_network_statement::StatementHandlerPrototype::new(
+			genesis_hash,
+			config.chain_spec.fork_id(),
+		);
+	net_config.add_notification_protocol(statement_config);
 
 	let mixnet_protocol_name =
 		sc_mixnet::protocol_name(genesis_hash.as_ref(), config.chain_spec.fork_id());
-	if let Some(mixnet_config) = &mixnet_config {
-		net_config.add_notification_protocol(sc_mixnet::peers_set_config(
-			mixnet_protocol_name.clone(),
-			mixnet_config,
-		));
-	}
+	let mixnet_notification_service = mixnet_config.as_ref().map(|mixnet_config| {
+		let (config, notification_service) =
+			sc_mixnet::peers_set_config(mixnet_protocol_name.clone(), mixnet_config);
+		net_config.add_notification_protocol(config);
+		notification_service
+	});
 
 	let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
 		backend.clone(),
@@ -422,6 +422,8 @@ pub fn new_full_base(
 			mixnet_protocol_name,
 			transaction_pool.clone(),
 			Some(keystore_container.keystore()),
+			mixnet_notification_service
+				.expect("`NotificationService` exists since mixnet was enabled; qed"),
 		);
 		task_manager.spawn_handle().spawn("mixnet", None, mixnet);
 	}
@@ -590,6 +592,7 @@ pub fn new_full_base(
 			link: grandpa_link,
 			network: network.clone(),
 			sync: Arc::new(sync_service.clone()),
+			notification_service: grandpa_notification_service,
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
 			voting_rule: grandpa::VotingRulesBuilder::default().build(),
 			prometheus_registry: prometheus_registry.clone(),
diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs
index 10a6071aae6..3827559057d 100644
--- a/substrate/client/consensus/beefy/src/communication/mod.rs
+++ b/substrate/client/consensus/beefy/src/communication/mod.rs
@@ -67,10 +67,16 @@ pub(crate) mod beefy_protocol_name {
 /// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`].
 pub fn beefy_peers_set_config(
 	gossip_protocol_name: sc_network::ProtocolName,
-) -> sc_network::config::NonDefaultSetConfig {
-	let mut cfg = sc_network::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024);
+) -> (sc_network::config::NonDefaultSetConfig, Box<dyn sc_network::NotificationService>) {
+	let (mut cfg, notification_service) = sc_network::config::NonDefaultSetConfig::new(
+		gossip_protocol_name,
+		Vec::new(),
+		1024 * 1024,
+		None,
+		Default::default(),
+	);
 	cfg.allow_non_reserved(25, 25);
-	cfg
+	(cfg, notification_service)
 }
 
 // cost scalars for reporting peers.
diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs
index 3d104f13250..b3ff11add27 100644
--- a/substrate/client/consensus/beefy/src/lib.rs
+++ b/substrate/client/consensus/beefy/src/lib.rs
@@ -38,7 +38,7 @@ use parking_lot::Mutex;
 use prometheus::Registry;
 use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer};
 use sc_consensus::BlockImport;
-use sc_network::{NetworkRequest, ProtocolName};
+use sc_network::{NetworkRequest, NotificationService, ProtocolName};
 use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing};
 use sp_api::ProvideRuntimeApi;
 use sp_blockchain::{
@@ -178,6 +178,8 @@ pub struct BeefyNetworkParams<B: Block, N, S> {
 	pub network: Arc<N>,
 	/// Syncing service implementing a sync oracle and an event stream for peers.
 	pub sync: Arc<S>,
+	/// Handle for receiving notification events.
+	pub notification_service: Box<dyn NotificationService>,
 	/// Chain specific BEEFY gossip protocol name. See
 	/// [`communication::beefy_protocol_name::gossip_protocol_name`].
 	pub gossip_protocol_name: ProtocolName,
@@ -243,6 +245,7 @@ pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
 	let BeefyNetworkParams {
 		network,
 		sync,
+		notification_service,
 		gossip_protocol_name,
 		justifications_protocol_name,
 		..
@@ -264,6 +267,7 @@ pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
 	let gossip_engine = GossipEngine::new(
 		network.clone(),
 		sync.clone(),
+		notification_service,
 		gossip_protocol_name.clone(),
 		gossip_validator.clone(),
 		None,
diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs
index 3aaa59cbfa1..3f800166e26 100644
--- a/substrate/client/consensus/beefy/src/tests.rs
+++ b/substrate/client/consensus/beefy/src/tests.rs
@@ -72,7 +72,7 @@ use substrate_test_runtime_client::{BlockBuilderExt, ClientExt};
 use tokio::time::Duration;
 
 const GENESIS_HASH: H256 = H256::zero();
-fn beefy_gossip_proto_name() -> ProtocolName {
+pub(crate) fn beefy_gossip_proto_name() -> ProtocolName {
 	gossip_protocol_name(GENESIS_HASH, None)
 }
 
@@ -371,6 +371,7 @@ async fn voter_init_setup(
 	let mut gossip_engine = sc_network_gossip::GossipEngine::new(
 		net.peer(0).network_service().clone(),
 		net.peer(0).sync_service().clone(),
+		net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(),
 		"/beefy/whatever",
 		gossip_validator,
 		None,
@@ -392,6 +393,14 @@ where
 {
 	let tasks = FuturesUnordered::new();
 
+	let mut notification_services = peers
+		.iter()
+		.map(|(peer_id, _, _)| {
+			let peer = &mut net.peers[*peer_id];
+			(*peer_id, peer.take_notification_service(&beefy_gossip_proto_name()).unwrap())
+		})
+		.collect::<std::collections::HashMap<_, _>>();
+
 	for (peer_id, key, api) in peers.into_iter() {
 		let peer = &net.peers[peer_id];
 
@@ -409,6 +418,7 @@ where
 		let network_params = crate::BeefyNetworkParams {
 			network: peer.network_service().clone(),
 			sync: peer.sync_service().clone(),
+			notification_service: notification_services.remove(&peer_id).unwrap(),
 			gossip_protocol_name: beefy_gossip_proto_name(),
 			justifications_protocol_name: on_demand_justif_handler.protocol_name(),
 			_phantom: PhantomData,
@@ -1045,7 +1055,25 @@ async fn should_initialize_voter_at_custom_genesis() {
 	net.peer(0).client().as_client().finalize_block(hashes[8], None).unwrap();
 
 	// load persistent state - nothing in DB, should init at genesis
-	let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap();
+	//
+	// NOTE: code from `voter_init_setup()` is moved here because the new network event system
+	// doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the
+	// first `GossipEngine`
+	let known_peers = Arc::new(Mutex::new(KnownPeers::new()));
+	let (gossip_validator, _) = GossipValidator::new(known_peers);
+	let gossip_validator = Arc::new(gossip_validator);
+	let mut gossip_engine = sc_network_gossip::GossipEngine::new(
+		net.peer(0).network_service().clone(),
+		net.peer(0).sync_service().clone(),
+		net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(),
+		"/beefy/whatever",
+		gossip_validator,
+		None,
+	);
+	let (beefy_genesis, best_grandpa) =
+		wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap();
+	let persisted_state =
+		load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap();
 
 	// Test initialization at session boundary.
 	// verify voter initialized with single session starting at block `custom_pallet_genesis` (7)
@@ -1075,7 +1103,11 @@ async fn should_initialize_voter_at_custom_genesis() {
 
 	net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap();
 	// load persistent state - state preset in DB, but with different pallet genesis
-	let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap();
+	// the network state persists and uses the old `GossipEngine` initialized for `peer(0)`
+	let (beefy_genesis, best_grandpa) =
+		wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap();
+	let new_persisted_state =
+		load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap();
 
 	// verify voter initialized with single session starting at block `new_pallet_genesis` (10)
 	let sessions = new_persisted_state.voting_oracle().sessions();
@@ -1371,7 +1403,7 @@ async fn gossipped_finality_proofs() {
 	let api = Arc::new(TestApi::with_validator_set(&validator_set));
 	let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect();
 
-	let charlie = &net.peers[2];
+	let charlie = &mut net.peers[2];
 	let known_peers = Arc::new(Mutex::new(KnownPeers::<Block>::new()));
 	// Charlie will run just the gossip engine and not the full voter.
 	let (gossip_validator, _) = GossipValidator::new(known_peers);
@@ -1384,6 +1416,7 @@ async fn gossipped_finality_proofs() {
 	let mut charlie_gossip_engine = sc_network_gossip::GossipEngine::new(
 		charlie.network_service().clone(),
 		charlie.sync_service().clone(),
+		charlie.take_notification_service(&beefy_gossip_proto_name()).unwrap(),
 		beefy_gossip_proto_name(),
 		charlie_gossip_validator.clone(),
 		None,
diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs
index 0eea5647e51..1fbda974053 100644
--- a/substrate/client/consensus/beefy/src/worker.rs
+++ b/substrate/client/consensus/beefy/src/worker.rs
@@ -1145,12 +1145,16 @@ pub(crate) mod tests {
 		let api = Arc::new(TestApi::with_validator_set(&genesis_validator_set));
 		let network = peer.network_service().clone();
 		let sync = peer.sync_service().clone();
+		let notification_service = peer
+			.take_notification_service(&crate::tests::beefy_gossip_proto_name())
+			.unwrap();
 		let known_peers = Arc::new(Mutex::new(KnownPeers::new()));
 		let (gossip_validator, gossip_report_stream) = GossipValidator::new(known_peers.clone());
 		let gossip_validator = Arc::new(gossip_validator);
 		let gossip_engine = GossipEngine::new(
 			network.clone(),
 			sync.clone(),
+			notification_service,
 			"/beefy/1",
 			gossip_validator.clone(),
 			None,
diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs
index 6d9e956b41b..5c7e1276297 100644
--- a/substrate/client/consensus/grandpa/src/communication/mod.rs
+++ b/substrate/client/consensus/grandpa/src/communication/mod.rs
@@ -46,7 +46,7 @@ use finality_grandpa::{
 	Message::{Precommit, Prevote, PrimaryPropose},
 };
 use parity_scale_codec::{Decode, DecodeAll, Encode};
-use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange};
+use sc_network::{NetworkBlock, NetworkSyncForkRequest, NotificationService, ReputationChange};
 use sc_network_gossip::{GossipEngine, Network as GossipNetwork};
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO};
 use sp_keystore::KeystorePtr;
@@ -247,6 +247,7 @@ impl<B: BlockT, N: Network<B>, S: Syncing<B>> NetworkBridge<B, N, S> {
 	pub(crate) fn new(
 		service: N,
 		sync: S,
+		notification_service: Box<dyn NotificationService>,
 		config: crate::Config,
 		set_state: crate::environment::SharedVoterSetState<B>,
 		prometheus_registry: Option<&Registry>,
@@ -260,6 +261,7 @@ impl<B: BlockT, N: Network<B>, S: Syncing<B>> NetworkBridge<B, N, S> {
 		let gossip_engine = Arc::new(Mutex::new(GossipEngine::new(
 			service.clone(),
 			sync.clone(),
+			notification_service,
 			protocol,
 			validator.clone(),
 			prometheus_registry,
diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs
index 4a869d0f515..b76b1af93da 100644
--- a/substrate/client/consensus/grandpa/src/communication/tests.rs
+++ b/substrate/client/consensus/grandpa/src/communication/tests.rs
@@ -24,16 +24,17 @@ use super::{
 };
 use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState};
 use futures::prelude::*;
-use parity_scale_codec::Encode;
+use parity_scale_codec::{DecodeAll, Encode};
 use sc_network::{
 	config::{MultiaddrWithPeerId, Role},
 	event::Event as NetworkEvent,
+	service::traits::{Direction, MessageSink, NotificationEvent, NotificationService},
 	types::ProtocolName,
 	Multiaddr, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
 	NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender,
 	PeerId, ReputationChange,
 };
-use sc_network_common::role::ObservedRole;
+use sc_network_common::role::{ObservedRole, Roles};
 use sc_network_gossip::Validator;
 use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream};
 use sc_network_test::{Block, Hash};
@@ -123,6 +124,12 @@ impl NetworkPeers for TestNetwork {
 	fn sync_num_connected(&self) -> usize {
 		unimplemented!();
 	}
+
+	fn peer_role(&self, _peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+		Roles::decode_all(&mut &handshake[..])
+			.ok()
+			.and_then(|role| Some(ObservedRole::from(role)))
+	}
 }
 
 impl NetworkEventStream for TestNetwork {
@@ -211,10 +218,70 @@ impl NetworkSyncForkRequest<Hash, NumberFor<Block>> for TestSync {
 	fn set_sync_fork_request(&self, _peers: Vec<PeerId>, _hash: Hash, _number: NumberFor<Block>) {}
 }
 
+#[derive(Debug)]
+pub(crate) struct TestNotificationService {
+	sender: TracingUnboundedSender<Event>,
+	rx: TracingUnboundedReceiver<NotificationEvent>,
+}
+
+#[async_trait::async_trait]
+impl NotificationService for TestNotificationService {
+	/// Instruct `Notifications` to open a new substream for `peer`.
+	async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Instruct `Notifications` to close substream for `peer`.
+	async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Send synchronous `notification` to `peer`.
+	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>) {
+		let _ = self.sender.unbounded_send(Event::WriteNotification(*peer, notification));
+	}
+
+	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
+	async fn send_async_notification(
+		&self,
+		_peer: &PeerId,
+		_notification: Vec<u8>,
+	) -> Result<(), sc_network::error::Error> {
+		unimplemented!();
+	}
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	async fn set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	fn try_set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	/// Get next event from the `Notifications` event stream.
+	async fn next_event(&mut self) -> Option<NotificationEvent> {
+		self.rx.next().await
+	}
+
+	fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+		unimplemented!();
+	}
+
+	fn protocol(&self) -> &ProtocolName {
+		unimplemented!();
+	}
+
+	fn message_sink(&self, _peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+		unimplemented!();
+	}
+}
+
 pub(crate) struct Tester {
 	pub(crate) net_handle: super::NetworkBridge<Block, TestNetwork, TestSync>,
 	gossip_validator: Arc<GossipValidator<Block>>,
 	pub(crate) events: TracingUnboundedReceiver<Event>,
+	pub(crate) notification_tx: TracingUnboundedSender<NotificationEvent>,
 }
 
 impl Tester {
@@ -279,6 +346,9 @@ fn voter_set_state() -> SharedVoterSetState<Block> {
 // needs to run in a tokio runtime.
 pub(crate) fn make_test_network() -> (impl Future<Output = Tester>, TestNetwork) {
 	let (tx, rx) = tracing_unbounded("test", 100_000);
+	let (notification_tx, notification_rx) = tracing_unbounded("test-notification", 100_000);
+
+	let notification_service = TestNotificationService { rx: notification_rx, sender: tx.clone() };
 	let net = TestNetwork { sender: tx };
 	let sync = TestSync {};
 
@@ -293,14 +363,22 @@ pub(crate) fn make_test_network() -> (impl Future<Output = Tester>, TestNetwork)
 		}
 	}
 
-	let bridge =
-		super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None);
+	let bridge = super::NetworkBridge::new(
+		net.clone(),
+		sync,
+		Box::new(notification_service),
+		config(),
+		voter_set_state(),
+		None,
+		None,
+	);
 
 	(
 		futures::future::ready(Tester {
 			gossip_validator: bridge.validator.clone(),
 			net_handle: bridge,
 			events: rx,
+			notification_tx,
 		}),
 		net,
 	)
@@ -385,63 +463,62 @@ fn good_commit_leads_to_relay() {
 			let commit_to_send = encoded_commit.clone();
 			let network_bridge = tester.net_handle.clone();
 
-			// asking for global communication will cause the test network
-			// to send us an event asking us for a stream. use it to
-			// send a message.
+			// `NetworkBridge` will be operational as soon as it's created and it's
+			// waiting for events from the network. Send it events that inform that
+			// a notification stream was opened and that a notification was received.
+			//
+			// Since each protocol has its own notification stream, events need not be filtered.
 			let sender_id = id;
-			let send_message = tester.filter_network_events(move |event| match event {
-				Event::EventStream(sender) => {
-					// Add the sending peer and send the commit
-					let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened {
-						remote: sender_id,
-						protocol: grandpa_protocol_name::NAME.into(),
+
+			let send_message = async move {
+				let _ = tester.notification_tx.unbounded_send(
+					NotificationEvent::NotificationStreamOpened {
+						peer: sender_id,
+						direction: Direction::Inbound,
 						negotiated_fallback: None,
-						role: ObservedRole::Full,
-						received_handshake: vec![],
+						handshake: Roles::FULL.encode(),
+					},
+				);
+				let _ = tester.notification_tx.unbounded_send(
+					NotificationEvent::NotificationReceived {
+						peer: sender_id,
+						notification: commit_to_send.clone(),
+					},
+				);
+
+				// Add a random peer which will be the recipient of this message
+				let receiver_id = PeerId::random();
+				let _ = tester.notification_tx.unbounded_send(
+					NotificationEvent::NotificationStreamOpened {
+						peer: receiver_id,
+						direction: Direction::Inbound,
+						negotiated_fallback: None,
+						handshake: Roles::FULL.encode(),
+					},
+				);
+
+				// Announce its local set being on the current set id through a neighbor
+				// packet, otherwise it won't be eligible to receive the commit
+				let _ = {
+					let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket {
+						round: Round(round),
+						set_id: SetId(set_id),
+						commit_finalized_height: 1,
 					});
 
-					let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived {
-						remote: sender_id,
-						messages: vec![(
-							grandpa_protocol_name::NAME.into(),
-							commit_to_send.clone().into(),
-						)],
-					});
+					let msg = gossip::GossipMessage::<Block>::Neighbor(update);
 
-					// Add a random peer which will be the recipient of this message
-					let receiver_id = PeerId::random();
-					let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened {
-						remote: receiver_id,
-						protocol: grandpa_protocol_name::NAME.into(),
-						negotiated_fallback: None,
-						role: ObservedRole::Full,
-						received_handshake: vec![],
-					});
+					let _ = tester.notification_tx.unbounded_send(
+						NotificationEvent::NotificationReceived {
+							peer: receiver_id,
+							notification: msg.encode(),
+						},
+					);
+				};
 
-					// Announce its local set has being on the current set id through a neighbor
-					// packet, otherwise it won't be eligible to receive the commit
-					let _ = {
-						let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket {
-							round: Round(round),
-							set_id: SetId(set_id),
-							commit_finalized_height: 1,
-						});
-
-						let msg = gossip::GossipMessage::<Block>::Neighbor(update);
-
-						sender.unbounded_send(NetworkEvent::NotificationsReceived {
-							remote: receiver_id,
-							messages: vec![(
-								grandpa_protocol_name::NAME.into(),
-								msg.encode().into(),
-							)],
-						})
-					};
-
-					true
-				},
-				_ => false,
-			});
+				tester
+			}
+			.boxed();
 
 			// when the commit comes in, we'll tell the callback it was good.
 			let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() {
@@ -537,31 +614,32 @@ fn bad_commit_leads_to_report() {
 			let commit_to_send = encoded_commit.clone();
 			let network_bridge = tester.net_handle.clone();
 
-			// asking for global communication will cause the test network
-			// to send us an event asking us for a stream. use it to
-			// send a message.
+			// `NetworkBridge` will be operational as soon as it's created and it's
+			// waiting for events from the network. Send it events that inform that
+			// a notification stream was opened and that a notification was received.
+			//
+			// Since each protocol has its own notification stream, events need not be filtered.
 			let sender_id = id;
-			let send_message = tester.filter_network_events(move |event| match event {
-				Event::EventStream(sender) => {
-					let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened {
-						remote: sender_id,
-						protocol: grandpa_protocol_name::NAME.into(),
+
+			let send_message = async move {
+				let _ = tester.notification_tx.unbounded_send(
+					NotificationEvent::NotificationStreamOpened {
+						peer: sender_id,
+						direction: Direction::Inbound,
 						negotiated_fallback: None,
-						role: ObservedRole::Full,
-						received_handshake: vec![],
-					});
-					let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived {
-						remote: sender_id,
-						messages: vec![(
-							grandpa_protocol_name::NAME.into(),
-							commit_to_send.clone().into(),
-						)],
-					});
+						handshake: Roles::FULL.encode(),
+					},
+				);
+				let _ = tester.notification_tx.unbounded_send(
+					NotificationEvent::NotificationReceived {
+						peer: sender_id,
+						notification: commit_to_send.clone(),
+					},
+				);
 
-					true
-				},
-				_ => false,
-			});
+				tester
+			}
+			.boxed();
 
 			// when the commit comes in, we'll tell the callback it was bad.
 			let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() {
diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs
index a4584e6fc80..b7cfc9f5b60 100644
--- a/substrate/client/consensus/grandpa/src/lib.rs
+++ b/substrate/client/consensus/grandpa/src/lib.rs
@@ -67,7 +67,7 @@ use sc_client_api::{
 	BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider,
 };
 use sc_consensus::BlockImport;
-use sc_network::types::ProtocolName;
+use sc_network::{types::ProtocolName, NotificationService};
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO};
 use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
@@ -687,6 +687,8 @@ pub struct GrandpaParams<Block: BlockT, C, N, S, SC, VR> {
 	pub network: N,
 	/// Event stream for syncing-related events.
 	pub sync: S,
+	/// Handle for interacting with `Notifications`.
+	pub notification_service: Box<dyn NotificationService>,
 	/// A voting rule used to potentially restrict target votes.
 	pub voting_rule: VR,
 	/// The prometheus metrics registry.
@@ -707,21 +709,21 @@ pub struct GrandpaParams<Block: BlockT, C, N, S, SC, VR> {
 /// For standard protocol name see [`crate::protocol_standard_name`].
 pub fn grandpa_peers_set_config(
 	protocol_name: ProtocolName,
-) -> sc_network::config::NonDefaultSetConfig {
+) -> (sc_network::config::NonDefaultSetConfig, Box<dyn NotificationService>) {
 	use communication::grandpa_protocol_name;
-	sc_network::config::NonDefaultSetConfig {
-		notifications_protocol: protocol_name,
-		fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(),
+	sc_network::config::NonDefaultSetConfig::new(
+		protocol_name,
+		grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(),
 		// Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot.
-		max_notification_size: 1024 * 1024,
-		handshake: None,
-		set_config: sc_network::config::SetConfig {
+		1024 * 1024,
+		None,
+		sc_network::config::SetConfig {
 			in_peers: 0,
 			out_peers: 0,
 			reserved_nodes: Vec::new(),
 			non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny,
 		},
-	}
+	)
 }
 
 /// Run a GRANDPA voter as a task. Provide configuration and a link to a
@@ -744,6 +746,7 @@ where
 		link,
 		network,
 		sync,
+		notification_service,
 		voting_rule,
 		prometheus_registry,
 		shared_voter_state,
@@ -770,6 +773,7 @@ where
 	let network = NetworkBridge::new(
 		network,
 		sync,
+		notification_service,
 		config.clone(),
 		persistent_data.set_state.clone(),
 		prometheus_registry.as_ref(),
diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs
index 8541baa822b..608ff5e46a0 100644
--- a/substrate/client/consensus/grandpa/src/observer.rs
+++ b/substrate/client/consensus/grandpa/src/observer.rs
@@ -28,6 +28,7 @@ use futures::prelude::*;
 use log::{debug, info, warn};
 
 use sc_client_api::backend::Backend;
+use sc_network::NotificationService;
 use sc_telemetry::TelemetryHandle;
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_blockchain::HeaderMetadata;
@@ -168,6 +169,7 @@ pub fn run_grandpa_observer<BE, Block: BlockT, Client, N, S, SC>(
 	link: LinkHalf<Block, Client, SC>,
 	network: N,
 	sync: S,
+	notification_service: Box<dyn NotificationService>,
 ) -> sp_blockchain::Result<impl Future<Output = ()> + Send>
 where
 	BE: Backend<Block> + Unpin + 'static,
@@ -189,6 +191,7 @@ where
 	let network = NetworkBridge::new(
 		network,
 		sync,
+		notification_service,
 		config.clone(),
 		persistent_data.set_state.clone(),
 		None,
@@ -414,14 +417,14 @@ mod tests {
 
 	use futures::executor;
 
-	/// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression
-	/// test for bug introduced in d4fbb897c and fixed in b7af8b339.
+	/// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`.
+	/// Regression test for bug introduced in d4fbb897c and fixed in b7af8b339.
 	///
-	/// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator`
-	/// to the underlying `dyn Network`. This test triggers a reputation change by calling
-	/// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork`
-	/// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test
-	/// network.
+	/// When polled, `NetworkBridge` forwards reputation change requests from the
+	/// `GossipValidator` to the underlying `dyn Network`. This test triggers a reputation change
+	/// by calling `GossipValidator::validate` with an invalid gossip message. After polling the
+	/// `ObserverWork` which should poll the `NetworkBridge`, the reputation change should be
+	/// forwarded to the test network.
 	#[test]
 	fn observer_work_polls_underlying_network_bridge() {
 		// Create a test network.
@@ -463,12 +466,6 @@ mod tests {
 			// validator to the test network.
 			assert!(observer.now_or_never().is_none());
 
-			// Ignore initial event stream request by gossip engine.
-			match tester.events.next().now_or_never() {
-				Some(Some(Event::EventStream(_))) => {},
-				_ => panic!("expected event stream request"),
-			};
-
 			assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _))));
 		});
 	}
diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs
index 644befe9885..7e42c2d45c7 100644
--- a/substrate/client/consensus/grandpa/src/tests.rs
+++ b/substrate/client/consensus/grandpa/src/tests.rs
@@ -317,6 +317,9 @@ fn initialize_grandpa(
 			(net.peers[peer_id].network_service().clone(), link)
 		};
 		let sync = net.peers[peer_id].sync_service().clone();
+		let notification_service = net.peers[peer_id]
+			.take_notification_service(&grandpa_protocol_name::NAME.into())
+			.unwrap();
 
 		let grandpa_params = GrandpaParams {
 			config: Config {
@@ -332,6 +335,7 @@ fn initialize_grandpa(
 			link,
 			network: net_service,
 			sync,
+			notification_service,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -472,6 +476,9 @@ async fn finalize_3_voters_1_full_observer() {
 		let net_service = net.peers[peer_id].network_service().clone();
 		let sync = net.peers[peer_id].sync_service().clone();
 		let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed");
+		let notification_service = net.peers[peer_id]
+			.take_notification_service(&grandpa_protocol_name::NAME.into())
+			.unwrap();
 
 		let grandpa_params = GrandpaParams {
 			config: Config {
@@ -487,6 +494,7 @@ async fn finalize_3_voters_1_full_observer() {
 			link,
 			network: net_service,
 			sync,
+			notification_service,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -557,14 +565,17 @@ async fn transition_3_voters_twice_1_full_observer() {
 	for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() {
 		let keystore = create_keystore(local_key);
 
-		let (net_service, link, sync) = {
-			let net = net.lock();
+		let (net_service, link, sync, notification_service) = {
+			let mut net = net.lock();
 			let link =
 				net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed");
 			(
 				net.peers[peer_id].network_service().clone(),
 				link,
 				net.peers[peer_id].sync_service().clone(),
+				net.peers[peer_id]
+					.take_notification_service(&grandpa_protocol_name::NAME.into())
+					.unwrap(),
 			)
 		};
 
@@ -582,6 +593,7 @@ async fn transition_3_voters_twice_1_full_observer() {
 			link,
 			network: net_service,
 			sync,
+			notification_service,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1025,6 +1037,9 @@ async fn voter_persists_its_votes() {
 		communication::NetworkBridge::new(
 			net.peers[1].network_service().clone(),
 			net.peers[1].sync_service().clone(),
+			net.peers[1]
+				.take_notification_service(&grandpa_protocol_name::NAME.into())
+				.unwrap(),
 			config.clone(),
 			set_state,
 			None,
@@ -1043,6 +1058,9 @@ async fn voter_persists_its_votes() {
 			(net.peers[0].network_service().clone(), link)
 		};
 		let sync = net.peers[0].sync_service().clone();
+		let notification_service = net.peers[0]
+			.take_notification_service(&grandpa_protocol_name::NAME.into())
+			.unwrap();
 
 		let grandpa_params = GrandpaParams {
 			config: Config {
@@ -1058,6 +1076,7 @@ async fn voter_persists_its_votes() {
 			link,
 			network: net_service,
 			sync,
+			notification_service,
 			voting_rule: VotingRulesBuilder::default().build(),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1082,6 +1101,9 @@ async fn voter_persists_its_votes() {
 		net.add_authority_peer();
 		let net_service = net.peers[2].network_service().clone();
 		let sync = net.peers[2].sync_service().clone();
+		let notification_service = net.peers[2]
+			.take_notification_service(&grandpa_protocol_name::NAME.into())
+			.unwrap();
 		// but we'll reuse the client from the first peer (alice_voter1)
 		// since we want to share the same database, so that we can
 		// read the persisted state after aborting alice_voter1.
@@ -1104,6 +1126,7 @@ async fn voter_persists_its_votes() {
 			link,
 			network: net_service,
 			sync,
+			notification_service,
 			voting_rule: VotingRulesBuilder::default().build(),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1255,6 +1278,9 @@ async fn finalize_3_voters_1_light_observer() {
 
 	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1);
 	let voters = initialize_grandpa(&mut net, authorities);
+	let notification_service = net.peers[3]
+		.take_notification_service(&grandpa_protocol_name::NAME.into())
+		.unwrap();
 	let observer = observer::run_grandpa_observer(
 		Config {
 			gossip_duration: TEST_GOSSIP_DURATION,
@@ -1269,6 +1295,7 @@ async fn finalize_3_voters_1_light_observer() {
 		net.peers[3].data.lock().take().expect("link initialized at startup; qed"),
 		net.peers[3].network_service().clone(),
 		net.peers[3].sync_service().clone(),
+		notification_service,
 	)
 	.unwrap();
 	net.peer(0).push_blocks(20, false);
@@ -1317,6 +1344,10 @@ async fn voter_catches_up_to_latest_round_when_behind() {
 			link,
 			network: net.peer(peer_id).network_service().clone(),
 			sync: net.peer(peer_id).sync_service().clone(),
+			notification_service: net
+				.peer(peer_id)
+				.take_notification_service(&grandpa_protocol_name::NAME.into())
+				.unwrap(),
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1409,6 +1440,7 @@ fn test_environment_with_select_chain<N, S, VR, SC>(
 	keystore: Option<KeystorePtr>,
 	network_service: N,
 	sync_service: S,
+	notification_service: Box<dyn NotificationService>,
 	select_chain: SC,
 	voting_rule: VR,
 ) -> TestEnvironment<N, S, SC, VR>
@@ -1433,6 +1465,7 @@ where
 	let network = NetworkBridge::new(
 		network_service.clone(),
 		sync_service,
+		notification_service,
 		config.clone(),
 		set_state.clone(),
 		None,
@@ -1462,6 +1495,7 @@ fn test_environment<N, S, VR>(
 	keystore: Option<KeystorePtr>,
 	network_service: N,
 	sync_service: S,
+	notification_service: Box<dyn NotificationService>,
 	voting_rule: VR,
 ) -> TestEnvironment<N, S, LongestChain<substrate_test_runtime_client::Backend, Block>, VR>
 where
@@ -1474,6 +1508,7 @@ where
 		keystore,
 		network_service,
 		sync_service,
+		notification_service,
 		link.select_chain.clone(),
 		voting_rule,
 	)
@@ -1490,14 +1525,22 @@ async fn grandpa_environment_respects_voting_rules() {
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let sync_service = peer.sync_service().clone();
+	let mut notification_service =
+		peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap();
 	let link = peer.data.lock().take().unwrap();
 
 	// add 21 blocks
 	let hashes = peer.push_blocks(21, false);
 
 	// create an environment with no voting rule restrictions
-	let unrestricted_env =
-		test_environment(&link, None, network_service.clone(), sync_service.clone(), ());
+	let unrestricted_env = test_environment(
+		&link,
+		None,
+		network_service.clone(),
+		sync_service.clone(),
+		notification_service.clone().unwrap(),
+		(),
+	);
 
 	// another with 3/4 unfinalized chain voting rule restriction
 	let three_quarters_env = test_environment(
@@ -1505,6 +1548,7 @@ async fn grandpa_environment_respects_voting_rules() {
 		None,
 		network_service.clone(),
 		sync_service.clone(),
+		notification_service.clone().unwrap(),
 		voting_rule::ThreeQuartersOfTheUnfinalizedChain,
 	);
 
@@ -1515,6 +1559,7 @@ async fn grandpa_environment_respects_voting_rules() {
 		None,
 		network_service.clone(),
 		sync_service,
+		notification_service,
 		VotingRulesBuilder::default().build(),
 	);
 
@@ -1608,6 +1653,8 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() {
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let sync_service = peer.sync_service().clone();
+	let notification_service =
+		peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap();
 	let link = peer.data.lock().take().unwrap();
 	let client = peer.client().as_client().clone();
 	let select_chain = MockSelectChain::default();
@@ -1622,6 +1669,7 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() {
 		None,
 		network_service.clone(),
 		sync_service,
+		notification_service,
 		select_chain.clone(),
 		voting_rule::BeforeBestBlockBy(5),
 	);
@@ -1669,6 +1717,8 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let sync_service = peer.sync_service().clone();
+	let notification_service =
+		peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap();
 	let link = peer.data.lock().take().unwrap();
 	let client = peer.client().as_client().clone();
 	let select_chain = MockSelectChain::default();
@@ -1678,6 +1728,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ
 		None,
 		network_service.clone(),
 		sync_service.clone(),
+		notification_service,
 		select_chain.clone(),
 		voting_rule.clone(),
 	);
@@ -1780,11 +1831,19 @@ async fn grandpa_environment_never_overwrites_round_voter_state() {
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let sync_service = peer.sync_service().clone();
+	let notification_service =
+		peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap();
 	let link = peer.data.lock().take().unwrap();
 
 	let keystore = create_keystore(peers[0]);
-	let environment =
-		test_environment(&link, Some(keystore), network_service.clone(), sync_service, ());
+	let environment = test_environment(
+		&link,
+		Some(keystore),
+		network_service.clone(),
+		sync_service,
+		notification_service,
+		(),
+	);
 
 	let round_state = || finality_grandpa::round::State::genesis(Default::default());
 	let base = || Default::default();
@@ -2012,9 +2071,18 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() {
 		let peer = net.peer(0);
 		let network_service = peer.network_service().clone();
 		let sync_service = peer.sync_service().clone();
+		let notification_service =
+			peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap();
 		let link = peer.data.lock().take().unwrap();
 		let keystore = create_keystore(alice);
-		test_environment(&link, Some(keystore), network_service.clone(), sync_service, ())
+		test_environment(
+			&link,
+			Some(keystore),
+			network_service.clone(),
+			sync_service,
+			notification_service,
+			(),
+		)
 	};
 
 	let signed_prevote = {
diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs
index e185754b076..1c06da1e3c1 100644
--- a/substrate/client/executor/wasmtime/src/tests.rs
+++ b/substrate/client/executor/wasmtime/src/tests.rs
@@ -384,7 +384,9 @@ fn test_max_memory_pages(
 									)
 									(i32.const -1)
 								)
-								(unreachable)
+								(then
+									(unreachable)
+								)
 							)
 
 							(i64.const 0)
@@ -421,7 +423,9 @@ fn test_max_memory_pages(
 										)
 										(i32.const -1)
 									)
-									(unreachable)
+									(then
+										(unreachable)
+									)
 								)
 
 								(i64.const 0)
diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml
index 86c5a37754a..d11cb1805ff 100644
--- a/substrate/client/mixnet/Cargo.toml
+++ b/substrate/client/mixnet/Cargo.toml
@@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 array-bytes = "4.1"
 arrayvec = "0.7.2"
 blake2 = "0.10.4"
+bytes = "1"
 codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
 futures = "0.3.25"
 futures-timer = "3.0.2"
diff --git a/substrate/client/mixnet/src/packet_dispatcher.rs b/substrate/client/mixnet/src/packet_dispatcher.rs
index 856208ecb34..420e0c68847 100644
--- a/substrate/client/mixnet/src/packet_dispatcher.rs
+++ b/substrate/client/mixnet/src/packet_dispatcher.rs
@@ -24,7 +24,7 @@ use libp2p_identity::PeerId;
 use log::{debug, warn};
 use mixnet::core::{AddressedPacket, NetworkStatus, Packet, PeerId as CorePeerId};
 use parking_lot::Mutex;
-use sc_network::{NetworkNotification, ProtocolName};
+use sc_network::NotificationService;
 use std::{collections::HashMap, future::Future, sync::Arc};
 
 const LOG_TARGET: &str = "mixnet";
@@ -77,41 +77,37 @@ pub struct ReadyPeer {
 }
 
 impl ReadyPeer {
-	/// If a future is returned, and if that future returns `Some`, this function should be called
-	/// again to send the next packet queued for the peer; `self` is placed in the `Some` to make
-	/// this straightforward. Otherwise, we have either sent or dropped all packets queued for the
-	/// peer, and it can be forgotten about for the time being.
+	/// If a future is returned, and if that future returns `Some`, this function should be
+	/// called again to send the next packet queued for the peer; `self` is placed in the `Some`
+	/// to make this straightforward. Otherwise, we have either sent or dropped all packets
+	/// queued for the peer, and it can be forgotten about for the time being.
 	pub fn send_packet(
 		self,
-		network: &impl NetworkNotification,
-		protocol_name: ProtocolName,
+		notification_service: &Box<dyn NotificationService>,
 	) -> Option<impl Future<Output = Option<Self>>> {
-		match network.notification_sender(self.id, protocol_name) {
-			Err(err) => {
+		match notification_service.message_sink(&self.id) {
+			None => {
 				debug!(
 					target: LOG_TARGET,
-					"Failed to get notification sender for peer ID {}: {err}", self.id
+					"Failed to get message sink for peer ID {}", self.id,
 				);
 				self.queue.clear();
 				None
 			},
-			Ok(sender) => Some(async move {
-				match sender.ready().await.and_then(|mut ready| {
-					let (packet, more_packets) = self.queue.pop();
-					let packet =
-						packet.expect("Should only be called if there is a packet to send");
-					ready.send((packet as Box<[_]>).into())?;
-					Ok(more_packets)
-				}) {
+			Some(sink) => Some(async move {
+				let (packet, more_packets) = self.queue.pop();
+				let packet = packet.expect("Should only be called if there is a packet to send");
+
+				match sink.send_async_notification((packet as Box<[_]>).into()).await {
+					Ok(_) => more_packets.then_some(self),
 					Err(err) => {
 						debug!(
 							target: LOG_TARGET,
-							"Notification sender for peer ID {} failed: {err}", self.id
+							"Failed to send packet to peer ID {}: {err}", self.id,
 						);
 						self.queue.clear();
 						None
 					},
-					Ok(more_packets) => more_packets.then(|| self),
 				}
 			}),
 		}
diff --git a/substrate/client/mixnet/src/protocol.rs b/substrate/client/mixnet/src/protocol.rs
index 555c267b86e..955502a4856 100644
--- a/substrate/client/mixnet/src/protocol.rs
+++ b/substrate/client/mixnet/src/protocol.rs
@@ -18,7 +18,10 @@
 
 use super::config::Config;
 use mixnet::core::PACKET_SIZE;
-use sc_network::{config::NonDefaultSetConfig, ProtocolName};
+use sc_network::{
+	config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig},
+	NotificationService, ProtocolName,
+};
 
 /// Returns the protocol name to use for the mixnet controlled by the given chain.
 pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName {
@@ -31,12 +34,26 @@ pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName
 }
 
 /// Returns the peers set configuration for the mixnet protocol.
-pub fn peers_set_config(name: ProtocolName, config: &Config) -> NonDefaultSetConfig {
-	let mut set_config = NonDefaultSetConfig::new(name, PACKET_SIZE as u64);
+pub fn peers_set_config(
+	name: ProtocolName,
+	config: &Config,
+) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
+	let (mut set_config, service) = NonDefaultSetConfig::new(
+		name,
+		Vec::new(),
+		PACKET_SIZE as u64,
+		None,
+		SetConfig {
+			in_peers: 0,
+			out_peers: 0,
+			reserved_nodes: Vec::new(),
+			non_reserved_mode: NonReservedPeerMode::Deny,
+		},
+	);
 	if config.substrate.num_gateway_slots != 0 {
 		// out_peers is always 0; we are only interested in connecting to mixnodes, which we do by
 		// setting them as reserved nodes
 		set_config.allow_non_reserved(config.substrate.num_gateway_slots, 0);
 	}
-	set_config
+	(set_config, service)
 }
diff --git a/substrate/client/mixnet/src/run.rs b/substrate/client/mixnet/src/run.rs
index 09020469d5e..14d188df097 100644
--- a/substrate/client/mixnet/src/run.rs
+++ b/substrate/client/mixnet/src/run.rs
@@ -29,11 +29,12 @@ use super::{
 	request::{extrinsic_delay, Request, SUBMIT_EXTRINSIC},
 	sync_with_runtime::sync_with_runtime,
 };
+use bytes::Bytes;
 use codec::{Decode, DecodeAll, Encode};
 use futures::{
 	future::{pending, Either},
 	stream::FuturesUnordered,
-	StreamExt,
+	FutureExt, StreamExt,
 };
 use log::{debug, error, trace, warn};
 use mixnet::{
@@ -43,8 +44,8 @@ use mixnet::{
 };
 use sc_client_api::{BlockchainEvents, HeaderBackend};
 use sc_network::{
-	Event::{NotificationStreamClosed, NotificationStreamOpened, NotificationsReceived},
-	NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo, ProtocolName,
+	service::traits::{NotificationEvent, ValidationResult},
+	NetworkNotification, NetworkPeers, NetworkStateInfo, NotificationService, ProtocolName,
 };
 use sc_transaction_pool_api::{
 	LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool,
@@ -154,12 +155,13 @@ pub async fn run<B, C, S, N, P>(
 	protocol_name: ProtocolName,
 	transaction_pool: Arc<P>,
 	keystore: Option<KeystorePtr>,
+	mut notification_service: Box<dyn NotificationService>,
 ) where
 	B: Block,
 	C: BlockchainEvents<B> + ProvideRuntimeApi<B> + HeaderBackend<B>,
 	C::Api: MixnetApi<B>,
 	S: SyncOracle,
-	N: NetworkStateInfo + NetworkEventStream + NetworkNotification + NetworkPeers,
+	N: NetworkStateInfo + NetworkNotification + NetworkPeers,
 	P: TransactionPool<Block = B> + LocalTransactionPool<Block = B> + 'static,
 {
 	let local_peer_id = network.local_peer_id();
@@ -189,7 +191,6 @@ pub async fn run<B, C, S, N, P>(
 	} else {
 		None
 	};
-	let mut network_events = network.event_stream("mixnet").fuse();
 	let mut next_forward_packet_delay = MaybeInfDelay::new(None);
 	let mut next_authored_packet_delay = MaybeInfDelay::new(None);
 	let mut ready_peers = FuturesUnordered::new();
@@ -248,33 +249,36 @@ pub async fn run<B, C, S, N, P>(
 				}
 			}
 
-			event = network_events.select_next_some() => match event {
-				NotificationStreamOpened { remote, protocol, .. }
-					if protocol == protocol_name => packet_dispatcher.add_peer(&remote),
-				NotificationStreamClosed { remote, protocol }
-					if protocol == protocol_name => packet_dispatcher.remove_peer(&remote),
-				NotificationsReceived { remote, messages } => {
-					for message in messages {
-						if message.0 == protocol_name {
-							match message.1.as_ref().try_into() {
-								Ok(packet) => handle_packet(packet,
-									&mut mixnet, &mut request_manager, &mut reply_manager,
-									&mut extrinsic_queue, &config.substrate),
-								Err(_) => debug!(target: LOG_TARGET,
-									"Dropped incorrectly sized packet ({} bytes) from {remote}",
-									message.1.len(),
-								),
-							}
-						}
+			event = notification_service.next_event().fuse() => match event {
+				None => todo!(),
+				Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => {
+					let _ = result_tx.send(ValidationResult::Accept);
+				},
+				Some(NotificationEvent::NotificationStreamOpened { peer, .. }) => {
+					packet_dispatcher.add_peer(&peer);
+				},
+				Some(NotificationEvent::NotificationStreamClosed { peer }) => {
+					packet_dispatcher.remove_peer(&peer);
+				},
+				Some(NotificationEvent::NotificationReceived { peer, notification }) => {
+					let notification: Bytes = notification.into();
+
+					match notification.as_ref().try_into() {
+						Ok(packet) => handle_packet(packet,
+							&mut mixnet, &mut request_manager, &mut reply_manager,
+							&mut extrinsic_queue, &config.substrate),
+						Err(_) => debug!(target: LOG_TARGET,
+							"Dropped incorrectly sized packet ({} bytes) from {peer}",
+							notification.len(),
+						),
 					}
-				}
-				_ => ()
+				},
 			},
 
 			_ = next_forward_packet_delay => {
 				if let Some(packet) = mixnet.pop_next_forward_packet() {
 					if let Some(ready_peer) = packet_dispatcher.dispatch(packet) {
-						if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+						if let Some(fut) = ready_peer.send_packet(&notification_service) {
 							ready_peers.push(fut);
 						}
 					}
@@ -288,7 +292,7 @@ pub async fn run<B, C, S, N, P>(
 			_ = next_authored_packet_delay => {
 				if let Some(packet) = mixnet.pop_next_authored_packet(&packet_dispatcher) {
 					if let Some(ready_peer) = packet_dispatcher.dispatch(packet) {
-						if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+						if let Some(fut) = ready_peer.send_packet(&notification_service) {
 							ready_peers.push(fut);
 						}
 					}
@@ -297,7 +301,7 @@ pub async fn run<B, C, S, N, P>(
 
 			ready_peer = ready_peers.select_next_some() => {
 				if let Some(ready_peer) = ready_peer {
-					if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) {
+					if let Some(fut) = ready_peer.send_packet(&notification_service) {
 						ready_peers.push(fut);
 					}
 				}
diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs
index 4a80b3c75f4..f3be9602541 100644
--- a/substrate/client/mixnet/src/sync_with_runtime.rs
+++ b/substrate/client/mixnet/src/sync_with_runtime.rs
@@ -196,6 +196,7 @@ where
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use multiaddr::multiaddr;
 
 	#[test]
 	fn fixup_empty_external_addresses() {
diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml
index 95e26a232c1..5006d5d0e3e 100644
--- a/substrate/client/network-gossip/Cargo.toml
+++ b/substrate/client/network-gossip/Cargo.toml
@@ -29,5 +29,7 @@ sp-runtime = { path = "../../primitives/runtime" }
 
 [dev-dependencies]
 tokio = "1.22.0"
+async-trait = "0.1.73"
+codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] }
 quickcheck = { version = "1.0.3", default-features = false }
 substrate-test-runtime-client = { path = "../../test-utils/runtime/client" }
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 8f7d490757b..c1bc414c3a3 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -21,7 +21,11 @@ use crate::{
 	Network, Syncing, Validator,
 };
 
-use sc_network::{event::Event, types::ProtocolName, ReputationChange};
+use sc_network::{
+	service::traits::{NotificationEvent, ValidationResult},
+	types::ProtocolName,
+	NotificationService, ReputationChange,
+};
 use sc_network_sync::SyncEvent;
 
 use futures::{
@@ -48,10 +52,10 @@ pub struct GossipEngine<B: BlockT> {
 	periodic_maintenance_interval: futures_timer::Delay,
 	protocol: ProtocolName,
 
-	/// Incoming events from the network.
-	network_event_stream: Pin<Box<dyn Stream<Item = Event> + Send>>,
 	/// Incoming events from the syncing service.
 	sync_event_stream: Pin<Box<dyn Stream<Item = SyncEvent> + Send>>,
+	/// Handle for polling notification-related events.
+	notification_service: Box<dyn NotificationService>,
 	/// Outgoing events to the consumer.
 	message_sinks: HashMap<B::Hash, Vec<Sender<TopicNotification>>>,
 	/// Buffered messages (see [`ForwardingState`]).
@@ -81,6 +85,7 @@ impl<B: BlockT> GossipEngine<B> {
 	pub fn new<N, S>(
 		network: N,
 		sync: S,
+		notification_service: Box<dyn NotificationService>,
 		protocol: impl Into<ProtocolName>,
 		validator: Arc<dyn Validator<B>>,
 		metrics_registry: Option<&Registry>,
@@ -91,17 +96,16 @@ impl<B: BlockT> GossipEngine<B> {
 		S: Syncing<B> + Send + Clone + 'static,
 	{
 		let protocol = protocol.into();
-		let network_event_stream = network.event_stream("network-gossip");
 		let sync_event_stream = sync.event_stream("network-gossip");
 
 		GossipEngine {
 			state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry),
 			network: Box::new(network),
 			sync: Box::new(sync),
+			notification_service,
 			periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL),
 			protocol,
 
-			network_event_stream,
 			sync_event_stream,
 			message_sinks: HashMap::new(),
 			forwarding_state: ForwardingState::Idle,
@@ -125,7 +129,7 @@ impl<B: BlockT> GossipEngine<B> {
 
 	/// Broadcast all messages with given topic.
 	pub fn broadcast_topic(&mut self, topic: B::Hash, force: bool) {
-		self.state_machine.broadcast_topic(&mut *self.network, topic, force);
+		self.state_machine.broadcast_topic(&mut self.notification_service, topic, force);
 	}
 
 	/// Get data of valid, incoming messages for a topic (but might have expired meanwhile).
@@ -150,19 +154,21 @@ impl<B: BlockT> GossipEngine<B> {
 
 	/// Send all messages with given topic to a peer.
 	pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) {
-		self.state_machine.send_topic(&mut *self.network, who, topic, force)
+		self.state_machine.send_topic(&mut self.notification_service, who, topic, force)
 	}
 
 	/// Multicast a message to all peers.
 	pub fn gossip_message(&mut self, topic: B::Hash, message: Vec<u8>, force: bool) {
-		self.state_machine.multicast(&mut *self.network, topic, message, force)
+		self.state_machine
+			.multicast(&mut self.notification_service, topic, message, force)
 	}
 
 	/// Send addressed message to the given peers. The message is not kept or multicast
 	/// later on.
 	pub fn send_message(&mut self, who: Vec<PeerId>, data: Vec<u8>) {
 		for who in &who {
-			self.state_machine.send_message(&mut *self.network, who, data.clone());
+			self.state_machine
+				.send_message(&mut self.notification_service, who, data.clone());
 		}
 	}
 
@@ -173,6 +179,11 @@ impl<B: BlockT> GossipEngine<B> {
 	pub fn announce(&self, block: B::Hash, associated_data: Option<Vec<u8>>) {
 		self.sync.announce_block(block, associated_data);
 	}
+
+	/// Consume [`GossipEngine`] and return the notification service.
+	pub fn take_notification_service(self) -> Box<dyn NotificationService> {
+		self.notification_service
+	}
 }
 
 impl<B: BlockT> Future for GossipEngine<B> {
@@ -184,46 +195,56 @@ impl<B: BlockT> Future for GossipEngine<B> {
 		'outer: loop {
 			match &mut this.forwarding_state {
 				ForwardingState::Idle => {
-					let net_event_stream = this.network_event_stream.poll_next_unpin(cx);
+					let next_notification_event =
+						this.notification_service.next_event().poll_unpin(cx);
 					let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx);
 
-					if net_event_stream.is_pending() && sync_event_stream.is_pending() {
+					if next_notification_event.is_pending() && sync_event_stream.is_pending() {
 						break
 					}
 
-					match net_event_stream {
+					match next_notification_event {
 						Poll::Ready(Some(event)) => match event {
-							Event::NotificationStreamOpened { remote, protocol, role, .. } =>
-								if protocol == this.protocol {
-									this.state_machine.new_peer(&mut *this.network, remote, role);
-								},
-							Event::NotificationStreamClosed { remote, protocol } => {
-								if protocol == this.protocol {
-									this.state_machine
-										.peer_disconnected(&mut *this.network, remote);
-								}
+							NotificationEvent::ValidateInboundSubstream {
+								peer,
+								handshake,
+								result_tx,
+								..
+							} => {
+								// only accept peers whose role can be determined
+								let result = this
+									.network
+									.peer_role(peer, handshake)
+									.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+								let _ = result_tx.send(result);
 							},
-							Event::NotificationsReceived { remote, messages } => {
-								let messages = messages
-									.into_iter()
-									.filter_map(|(engine, data)| {
-										if engine == this.protocol {
-											Some(data.to_vec())
-										} else {
-											None
-										}
-									})
-									.collect();
-
+							NotificationEvent::NotificationStreamOpened {
+								peer, handshake, ..
+							} => {
+								let Some(role) = this.network.peer_role(peer, handshake) else {
+									log::debug!(target: "gossip", "role for {peer} couldn't be determined");
+									continue
+								};
+
+								this.state_machine.new_peer(
+									&mut this.notification_service,
+									peer,
+									role,
+								);
+							},
+							NotificationEvent::NotificationStreamClosed { peer } => {
+								this.state_machine
+									.peer_disconnected(&mut this.notification_service, peer);
+							},
+							NotificationEvent::NotificationReceived { peer, notification } => {
 								let to_forward = this.state_machine.on_incoming(
 									&mut *this.network,
-									remote,
-									messages,
+									&mut this.notification_service,
+									peer,
+									vec![notification],
 								);
-
 								this.forwarding_state = ForwardingState::Busy(to_forward.into());
 							},
-							Event::Dht(_) => {},
 						},
 						// The network event stream closed. Do the same for [`GossipValidator`].
 						Poll::Ready(None) => {
@@ -306,7 +327,7 @@ impl<B: BlockT> Future for GossipEngine<B> {
 
 		while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) {
 			this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL);
-			this.state_machine.tick(&mut *this.network);
+			this.state_machine.tick(&mut this.notification_service);
 
 			this.message_sinks.retain(|_, sinks| {
 				sinks.retain(|sink| !sink.is_closed());
@@ -328,15 +349,19 @@ impl<B: BlockT> futures::future::FusedFuture for GossipEngine<B> {
 mod tests {
 	use super::*;
 	use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext};
+	use codec::{DecodeAll, Encode};
 	use futures::{
-		channel::mpsc::{unbounded, UnboundedSender},
+		channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender},
 		executor::{block_on, block_on_stream},
 		future::poll_fn,
 	};
 	use quickcheck::{Arbitrary, Gen, QuickCheck};
 	use sc_network::{
-		config::MultiaddrWithPeerId, NetworkBlock, NetworkEventStream, NetworkNotification,
-		NetworkPeers, NotificationSenderError, NotificationSenderT as NotificationSender,
+		config::MultiaddrWithPeerId,
+		service::traits::{Direction, MessageSink, NotificationEvent},
+		Event, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
+		NotificationSenderError, NotificationSenderT as NotificationSender, NotificationService,
+		Roles,
 	};
 	use sc_network_common::role::ObservedRole;
 	use sc_network_sync::SyncEventStream;
@@ -351,14 +376,10 @@ mod tests {
 	use substrate_test_runtime_client::runtime::Block;
 
 	#[derive(Clone, Default)]
-	struct TestNetwork {
-		inner: Arc<Mutex<TestNetworkInner>>,
-	}
+	struct TestNetwork {}
 
 	#[derive(Clone, Default)]
-	struct TestNetworkInner {
-		event_senders: Vec<UnboundedSender<Event>>,
-	}
+	struct TestNetworkInner {}
 
 	impl NetworkPeers for TestNetwork {
 		fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
@@ -422,14 +443,17 @@ mod tests {
 		fn sync_num_connected(&self) -> usize {
 			unimplemented!();
 		}
+
+		fn peer_role(&self, _peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+			Roles::decode_all(&mut &handshake[..])
+				.ok()
+				.and_then(|role| Some(ObservedRole::from(role)))
+		}
 	}
 
 	impl NetworkEventStream for TestNetwork {
 		fn event_stream(&self, _name: &'static str) -> Pin<Box<dyn Stream<Item = Event> + Send>> {
-			let (tx, rx) = unbounded();
-			self.inner.lock().unwrap().event_senders.push(tx);
-
-			Box::pin(rx)
+			unimplemented!();
 		}
 	}
 
@@ -501,6 +525,58 @@ mod tests {
 		}
 	}
 
+	#[derive(Debug)]
+	pub(crate) struct TestNotificationService {
+		rx: UnboundedReceiver<NotificationEvent>,
+	}
+
+	#[async_trait::async_trait]
+	impl sc_network::service::traits::NotificationService for TestNotificationService {
+		async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+			unimplemented!();
+		}
+
+		async fn send_async_notification(
+			&self,
+			_peer: &PeerId,
+			_notification: Vec<u8>,
+		) -> Result<(), sc_network::error::Error> {
+			unimplemented!();
+		}
+
+		async fn set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		fn try_set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		async fn next_event(&mut self) -> Option<NotificationEvent> {
+			self.rx.next().await
+		}
+
+		fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+			unimplemented!();
+		}
+
+		fn protocol(&self) -> &ProtocolName {
+			unimplemented!();
+		}
+
+		fn message_sink(&self, _peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+			unimplemented!();
+		}
+	}
+
 	struct AllowAll;
 	impl Validator<Block> for AllowAll {
 		fn validate(
@@ -521,16 +597,19 @@ mod tests {
 	fn returns_when_network_event_stream_closes() {
 		let network = TestNetwork::default();
 		let sync = Arc::new(TestSync::default());
+		let (tx, rx) = unbounded();
+		let notification_service = Box::new(TestNotificationService { rx });
 		let mut gossip_engine = GossipEngine::<Block>::new(
 			network.clone(),
 			sync,
+			notification_service,
 			"/my_protocol",
 			Arc::new(AllowAll {}),
 			None,
 		);
 
-		// Drop network event stream sender side.
-		drop(network.inner.lock().unwrap().event_senders.pop());
+		// drop notification service sender side.
+		drop(tx);
 
 		block_on(poll_fn(move |ctx| {
 			if let Poll::Pending = gossip_engine.poll_unpin(ctx) {
@@ -550,42 +629,37 @@ mod tests {
 		let remote_peer = PeerId::random();
 		let network = TestNetwork::default();
 		let sync = Arc::new(TestSync::default());
+		let (mut tx, rx) = unbounded();
+		let notification_service = Box::new(TestNotificationService { rx });
 
 		let mut gossip_engine = GossipEngine::<Block>::new(
 			network.clone(),
 			sync.clone(),
+			notification_service,
 			protocol.clone(),
 			Arc::new(AllowAll {}),
 			None,
 		);
 
-		let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap();
-
 		// Register the remote peer.
-		event_sender
-			.start_send(Event::NotificationStreamOpened {
-				remote: remote_peer,
-				protocol: protocol.clone(),
-				negotiated_fallback: None,
-				role: ObservedRole::Authority,
-				received_handshake: vec![],
-			})
-			.expect("Event stream is unbounded; qed.");
+		tx.send(NotificationEvent::NotificationStreamOpened {
+			peer: remote_peer,
+			direction: Direction::Inbound,
+			negotiated_fallback: None,
+			handshake: Roles::FULL.encode(),
+		})
+		.await
+		.unwrap();
 
 		let messages = vec![vec![1], vec![2]];
-		let events = messages
-			.iter()
-			.cloned()
-			.map(|m| Event::NotificationsReceived {
-				remote: remote_peer,
-				messages: vec![(protocol.clone(), m.into())],
-			})
-			.collect::<Vec<_>>();
 
 		// Send first event before subscribing.
-		event_sender
-			.start_send(events[0].clone())
-			.expect("Event stream is unbounded; qed.");
+		tx.send(NotificationEvent::NotificationReceived {
+			peer: remote_peer,
+			notification: messages[0].clone().into(),
+		})
+		.await
+		.unwrap();
 
 		let mut subscribers = vec![];
 		for _ in 0..2 {
@@ -593,9 +667,12 @@ mod tests {
 		}
 
 		// Send second event after subscribing.
-		event_sender
-			.start_send(events[1].clone())
-			.expect("Event stream is unbounded; qed.");
+		tx.send(NotificationEvent::NotificationReceived {
+			peer: remote_peer,
+			notification: messages[1].clone().into(),
+		})
+		.await
+		.unwrap();
 
 		tokio::spawn(gossip_engine);
 
@@ -672,6 +749,8 @@ mod tests {
 			let remote_peer = PeerId::random();
 			let network = TestNetwork::default();
 			let sync = Arc::new(TestSync::default());
+			let (mut tx, rx) = unbounded();
+			let notification_service = Box::new(TestNotificationService { rx });
 
 			let num_channels_per_topic = channels.iter().fold(
 				HashMap::new(),
@@ -699,6 +778,7 @@ mod tests {
 			let mut gossip_engine = GossipEngine::<Block>::new(
 				network.clone(),
 				sync.clone(),
+				notification_service,
 				protocol.clone(),
 				Arc::new(TestValidator {}),
 				None,
@@ -724,22 +804,18 @@ mod tests {
 				}
 			}
 
-			let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap();
-
 			// Register the remote peer.
-			event_sender
-				.start_send(Event::NotificationStreamOpened {
-					remote: remote_peer,
-					protocol: protocol.clone(),
-					negotiated_fallback: None,
-					role: ObservedRole::Authority,
-					received_handshake: vec![],
-				})
-				.expect("Event stream is unbounded; qed.");
+			tx.start_send(NotificationEvent::NotificationStreamOpened {
+				peer: remote_peer,
+				direction: Direction::Inbound,
+				negotiated_fallback: None,
+				handshake: Roles::FULL.encode(),
+			})
+			.unwrap();
 
 			// Send messages into the network event stream.
 			for (i_notification, messages) in notifications.iter().enumerate() {
-				let messages = messages
+				let messages: Vec<Vec<u8>> = messages
 					.into_iter()
 					.enumerate()
 					.map(|(i_message, Message { topic })| {
@@ -752,13 +828,17 @@ mod tests {
 						message.push(i_notification.try_into().unwrap());
 						message.push(i_message.try_into().unwrap());
 
-						(protocol.clone(), message.into())
+						message.into()
 					})
 					.collect();
 
-				event_sender
-					.start_send(Event::NotificationsReceived { remote: remote_peer, messages })
-					.expect("Event stream is unbounded; qed.");
+				for message in messages {
+					tx.start_send(NotificationEvent::NotificationReceived {
+						peer: remote_peer,
+						notification: message,
+					})
+					.unwrap();
+				}
 			}
 
 			let mut received_msgs_per_topic_all_chan = HashMap::<H256, _>::new();
diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs
index 4bfb5a7d37f..91b56b0f097 100644
--- a/substrate/client/network-gossip/src/state_machine.rs
+++ b/substrate/client/network-gossip/src/state_machine.rs
@@ -23,7 +23,7 @@ use libp2p::PeerId;
 use schnellru::{ByLength, LruMap};
 
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
-use sc_network::types::ProtocolName;
+use sc_network::{types::ProtocolName, NotificationService};
 use sc_network_common::role::ObservedRole;
 use sp_runtime::traits::{Block as BlockT, Hash, HashingFor};
 use std::{collections::HashMap, iter, sync::Arc, time, time::Instant};
@@ -74,33 +74,33 @@ struct MessageEntry<B: BlockT> {
 /// Local implementation of `ValidatorContext`.
 struct NetworkContext<'g, 'p, B: BlockT> {
 	gossip: &'g mut ConsensusGossip<B>,
-	network: &'p mut dyn Network<B>,
+	notification_service: &'p mut Box<dyn NotificationService>,
 }
 
 impl<'g, 'p, B: BlockT> ValidatorContext<B> for NetworkContext<'g, 'p, B> {
 	/// Broadcast all messages with given topic to peers that do not have it yet.
 	fn broadcast_topic(&mut self, topic: B::Hash, force: bool) {
-		self.gossip.broadcast_topic(self.network, topic, force);
+		self.gossip.broadcast_topic(self.notification_service, topic, force);
 	}
 
 	/// Broadcast a message to all peers that have not received it previously.
 	fn broadcast_message(&mut self, topic: B::Hash, message: Vec<u8>, force: bool) {
-		self.gossip.multicast(self.network, topic, message, force);
+		self.gossip.multicast(self.notification_service, topic, message, force);
 	}
 
 	/// Send addressed message to a peer.
 	fn send_message(&mut self, who: &PeerId, message: Vec<u8>) {
-		self.network.write_notification(*who, self.gossip.protocol.clone(), message);
+		self.notification_service.send_sync_notification(who, message);
 	}
 
 	/// Send all messages with given topic to a peer.
 	fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) {
-		self.gossip.send_topic(self.network, who, topic, force);
+		self.gossip.send_topic(self.notification_service, who, topic, force);
 	}
 }
 
 fn propagate<'a, B: BlockT, I>(
-	network: &mut dyn Network<B>,
+	notification_service: &mut Box<dyn NotificationService>,
 	protocol: ProtocolName,
 	messages: I,
 	intent: MessageIntent,
@@ -147,7 +147,7 @@ where
 				?message,
 				"Propagating message",
 			);
-			network.write_notification(*id, protocol.clone(), message.clone());
+			notification_service.send_sync_notification(id, message.clone());
 		}
 	}
 }
@@ -191,7 +191,12 @@ impl<B: BlockT> ConsensusGossip<B> {
 	}
 
 	/// Handle new connected peer.
-	pub fn new_peer(&mut self, network: &mut dyn Network<B>, who: PeerId, role: ObservedRole) {
+	pub fn new_peer(
+		&mut self,
+		notification_service: &mut Box<dyn NotificationService>,
+		who: PeerId,
+		role: ObservedRole,
+	) {
 		tracing::trace!(
 			target:"gossip",
 			%who,
@@ -202,7 +207,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 		self.peers.insert(who, PeerConsensus { known_messages: Default::default() });
 
 		let validator = self.validator.clone();
-		let mut context = NetworkContext { gossip: self, network };
+		let mut context = NetworkContext { gossip: self, notification_service };
 		validator.new_peer(&mut context, &who, role);
 	}
 
@@ -233,30 +238,35 @@ impl<B: BlockT> ConsensusGossip<B> {
 	}
 
 	/// Call when a peer has been disconnected to stop tracking gossip status.
-	pub fn peer_disconnected(&mut self, network: &mut dyn Network<B>, who: PeerId) {
+	pub fn peer_disconnected(
+		&mut self,
+		notification_service: &mut Box<dyn NotificationService>,
+		who: PeerId,
+	) {
 		let validator = self.validator.clone();
-		let mut context = NetworkContext { gossip: self, network };
+		let mut context = NetworkContext { gossip: self, notification_service };
 		validator.peer_disconnected(&mut context, &who);
 		self.peers.remove(&who);
 	}
 
 	/// Perform periodic maintenance
-	pub fn tick(&mut self, network: &mut dyn Network<B>) {
+	pub fn tick(&mut self, notification_service: &mut Box<dyn NotificationService>) {
 		self.collect_garbage();
 		if Instant::now() >= self.next_broadcast {
-			self.rebroadcast(network);
+			self.rebroadcast(notification_service);
 			self.next_broadcast = Instant::now() + REBROADCAST_INTERVAL;
 		}
 	}
 
 	/// Rebroadcast all messages to all peers.
-	fn rebroadcast(&mut self, network: &mut dyn Network<B>) {
+	fn rebroadcast(&mut self, notification_service: &mut Box<dyn NotificationService>) {
 		let messages = self
 			.messages
 			.iter()
 			.map(|entry| (&entry.message_hash, &entry.topic, &entry.message));
+
 		propagate(
-			network,
+			notification_service,
 			self.protocol.clone(),
 			messages,
 			MessageIntent::PeriodicRebroadcast,
@@ -266,7 +276,12 @@ impl<B: BlockT> ConsensusGossip<B> {
 	}
 
 	/// Broadcast all messages with given topic.
-	pub fn broadcast_topic(&mut self, network: &mut dyn Network<B>, topic: B::Hash, force: bool) {
+	pub fn broadcast_topic(
+		&mut self,
+		notification_service: &mut Box<dyn NotificationService>,
+		topic: B::Hash,
+		force: bool,
+	) {
 		let messages = self.messages.iter().filter_map(|entry| {
 			if entry.topic == topic {
 				Some((&entry.message_hash, &entry.topic, &entry.message))
@@ -276,7 +291,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 		});
 		let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast };
 		propagate(
-			network,
+			notification_service,
 			self.protocol.clone(),
 			messages,
 			intent,
@@ -327,6 +342,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 	pub fn on_incoming(
 		&mut self,
 		network: &mut dyn Network<B>,
+		notification_service: &mut Box<dyn NotificationService>,
 		who: PeerId,
 		messages: Vec<Vec<u8>>,
 	) -> Vec<(B::Hash, TopicNotification)> {
@@ -367,7 +383,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 			// validate the message
 			let validation = {
 				let validator = self.validator.clone();
-				let mut context = NetworkContext { gossip: self, network };
+				let mut context = NetworkContext { gossip: self, notification_service };
 				validator.validate(&mut context, &who, &message)
 			};
 
@@ -414,7 +430,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 	/// Send all messages with given topic to a peer.
 	pub fn send_topic(
 		&mut self,
-		network: &mut dyn Network<B>,
+		notification_service: &mut Box<dyn NotificationService>,
 		who: &PeerId,
 		topic: B::Hash,
 		force: bool,
@@ -443,7 +459,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 					?entry.message,
 					"Sending topic message",
 				);
-				network.write_notification(*who, self.protocol.clone(), entry.message.clone());
+				notification_service.send_sync_notification(who, entry.message.clone());
 			}
 		}
 	}
@@ -451,7 +467,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 	/// Multicast a message to all peers.
 	pub fn multicast(
 		&mut self,
-		network: &mut dyn Network<B>,
+		notification_service: &mut Box<dyn NotificationService>,
 		topic: B::Hash,
 		message: Vec<u8>,
 		force: bool,
@@ -460,7 +476,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 		self.register_message_hashed(message_hash, topic, message.clone(), None);
 		let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast };
 		propagate(
-			network,
+			notification_service,
 			self.protocol.clone(),
 			iter::once((&message_hash, &topic, &message)),
 			intent,
@@ -471,7 +487,12 @@ impl<B: BlockT> ConsensusGossip<B> {
 
 	/// Send addressed message to a peer. The message is not kept or multicast
 	/// later on.
-	pub fn send_message(&mut self, network: &mut dyn Network<B>, who: &PeerId, message: Vec<u8>) {
+	pub fn send_message(
+		&mut self,
+		notification_service: &mut Box<dyn NotificationService>,
+		who: &PeerId,
+		message: Vec<u8>,
+	) {
 		let peer = match self.peers.get_mut(who) {
 			None => return,
 			Some(peer) => peer,
@@ -488,7 +509,7 @@ impl<B: BlockT> ConsensusGossip<B> {
 		);
 
 		peer.known_messages.insert(message_hash);
-		network.write_notification(*who, self.protocol.clone(), message);
+		notification_service.send_sync_notification(who, message)
 	}
 }
 
@@ -524,9 +545,9 @@ mod tests {
 	use crate::multiaddr::Multiaddr;
 	use futures::prelude::*;
 	use sc_network::{
-		config::MultiaddrWithPeerId, event::Event, NetworkBlock, NetworkEventStream,
-		NetworkNotification, NetworkPeers, NotificationSenderError,
-		NotificationSenderT as NotificationSender, ReputationChange,
+		config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink,
+		NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
+		NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange,
 	};
 	use sp_runtime::{
 		testing::{Block as RawBlock, ExtrinsicWrapper, H256},
@@ -651,6 +672,10 @@ mod tests {
 		fn sync_num_connected(&self) -> usize {
 			unimplemented!();
 		}
+
+		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
+			None
+		}
 	}
 
 	impl NetworkEventStream for NoOpNetwork {
@@ -691,6 +716,62 @@ mod tests {
 		}
 	}
 
+	#[derive(Debug, Default)]
+	struct NoOpNotificationService {}
+
+	#[async_trait::async_trait]
+	impl NotificationService for NoOpNotificationService {
+		/// Instruct `Notifications` to open a new substream for `peer`.
+		async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		/// Instruct `Notifications` to close substream for `peer`.
+		async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		/// Send synchronous `notification` to `peer`.
+		fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+			unimplemented!();
+		}
+
+		/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
+		async fn send_async_notification(
+			&self,
+			_peer: &PeerId,
+			_notification: Vec<u8>,
+		) -> Result<(), sc_network::error::Error> {
+			unimplemented!();
+		}
+
+		/// Set handshake for the notification protocol replacing the old handshake.
+		async fn set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		fn try_set_handshake(&mut self, _handshake: Vec<u8>) -> Result<(), ()> {
+			unimplemented!();
+		}
+
+		/// Get next event from the `Notifications` event stream.
+		async fn next_event(&mut self) -> Option<NotificationEvent> {
+			None
+		}
+
+		fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+			unimplemented!();
+		}
+
+		fn protocol(&self) -> &ProtocolName {
+			unimplemented!();
+		}
+
+		fn message_sink(&self, _peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+			unimplemented!();
+		}
+	}
+
 	#[test]
 	fn collects_garbage() {
 		struct AllowOne;
@@ -773,20 +854,28 @@ mod tests {
 	fn peer_is_removed_on_disconnect() {
 		let mut consensus = ConsensusGossip::<Block>::new(Arc::new(AllowAll), "/foo".into(), None);
 
-		let mut network = NoOpNetwork::default();
+		let mut notification_service: Box<dyn NotificationService> =
+			Box::new(NoOpNotificationService::default());
 
 		let peer_id = PeerId::random();
-		consensus.new_peer(&mut network, peer_id, ObservedRole::Full);
+		consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full);
 		assert!(consensus.peers.contains_key(&peer_id));
 
-		consensus.peer_disconnected(&mut network, peer_id);
+		consensus.peer_disconnected(&mut notification_service, peer_id);
 		assert!(!consensus.peers.contains_key(&peer_id));
 	}
 
 	#[test]
 	fn on_incoming_ignores_discarded_messages() {
+		let mut notification_service: Box<dyn NotificationService> =
+			Box::new(NoOpNotificationService::default());
 		let to_forward = ConsensusGossip::<Block>::new(Arc::new(DiscardAll), "/foo".into(), None)
-			.on_incoming(&mut NoOpNetwork::default(), PeerId::random(), vec![vec![1, 2, 3]]);
+			.on_incoming(
+				&mut NoOpNetwork::default(),
+				&mut notification_service,
+				PeerId::random(),
+				vec![vec![1, 2, 3]],
+			);
 
 		assert!(
 			to_forward.is_empty(),
@@ -798,11 +887,14 @@ mod tests {
 	#[test]
 	fn on_incoming_ignores_unregistered_peer() {
 		let mut network = NoOpNetwork::default();
+		let mut notification_service: Box<dyn NotificationService> =
+			Box::new(NoOpNotificationService::default());
 		let remote = PeerId::random();
 
 		let to_forward = ConsensusGossip::<Block>::new(Arc::new(AllowAll), "/foo".into(), None)
 			.on_incoming(
 				&mut network,
+				&mut notification_service,
 				// Unregistered peer.
 				remote,
 				vec![vec![1, 2, 3]],
@@ -822,18 +914,20 @@ mod tests {
 		let mut consensus = ConsensusGossip::<Block>::new(Arc::new(AllowAll), "/foo".into(), None);
 
 		let mut network = NoOpNetwork::default();
+		let mut notification_service: Box<dyn NotificationService> =
+			Box::new(NoOpNotificationService::default());
 
 		let peer_id = PeerId::random();
-		consensus.new_peer(&mut network, peer_id, ObservedRole::Full);
+		consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full);
 		assert!(consensus.peers.contains_key(&peer_id));
 
 		let peer_id2 = PeerId::random();
-		consensus.new_peer(&mut network, peer_id2, ObservedRole::Full);
+		consensus.new_peer(&mut notification_service, peer_id2, ObservedRole::Full);
 		assert!(consensus.peers.contains_key(&peer_id2));
 
 		let message = vec![vec![1, 2, 3]];
-		consensus.on_incoming(&mut network, peer_id, message.clone());
-		consensus.on_incoming(&mut network, peer_id2, message.clone());
+		consensus.on_incoming(&mut network, &mut notification_service, peer_id, message.clone());
+		consensus.on_incoming(&mut network, &mut notification_service, peer_id2, message.clone());
 
 		assert_eq!(
 			vec![(peer_id, rep::GOSSIP_SUCCESS)],
diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml
index 7b0536addda..8b599f058f7 100644
--- a/substrate/client/network/Cargo.toml
+++ b/substrate/client/network/Cargo.toml
@@ -37,6 +37,8 @@ serde = { version = "1.0.188", features = ["derive"] }
 serde_json = "1.0.108"
 smallvec = "1.11.0"
 thiserror = "1.0"
+tokio = { version = "1.22.0", features = ["macros", "sync"] }
+tokio-stream = "0.1.7"
 unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] }
 zeroize = "1.4.3"
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" }
diff --git a/substrate/client/network/common/src/role.rs b/substrate/client/network/common/src/role.rs
index fd02c00e232..11b7a7924c4 100644
--- a/substrate/client/network/common/src/role.rs
+++ b/substrate/client/network/common/src/role.rs
@@ -28,7 +28,7 @@ use codec::{self, Encode, EncodeLike, Input, Output};
 /// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a
 /// >			node says about itself, while `ObservedRole` is a `Role` merged with the
 /// >			information known locally about that node.
-#[derive(Debug, Clone)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
 pub enum ObservedRole {
 	/// Full node.
 	Full,
@@ -45,6 +45,18 @@ impl ObservedRole {
 	}
 }
 
+impl From<Roles> for ObservedRole {
+	fn from(roles: Roles) -> Self {
+		if roles.is_authority() {
+			ObservedRole::Authority
+		} else if roles.is_full() {
+			ObservedRole::Full
+		} else {
+			ObservedRole::Light
+		}
+	}
+}
+
 /// Role of the local node.
 #[derive(Debug, Clone)]
 pub enum Role {
diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs
index 0aa724818e0..9f770bc3ba7 100644
--- a/substrate/client/network/src/behaviour.rs
+++ b/substrate/client/network/src/behaviour.rs
@@ -22,12 +22,13 @@ use crate::{
 	peer_info,
 	peer_store::PeerStoreHandle,
 	protocol::{CustomMessageOutcome, NotificationsSink, Protocol},
+	protocol_controller::SetId,
 	request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure},
+	service::traits::Direction,
 	types::ProtocolName,
 	ReputationChange,
 };
 
-use bytes::Bytes;
 use futures::channel::oneshot;
 use libp2p::{
 	core::Multiaddr, identify::Info as IdentifyInfo, identity::PublicKey, kad::RecordKey,
@@ -35,7 +36,6 @@ use libp2p::{
 };
 
 use parking_lot::Mutex;
-use sc_network_common::role::{ObservedRole, Roles};
 use sp_runtime::traits::Block as BlockT;
 use std::{collections::HashSet, sync::Arc, time::Duration};
 
@@ -97,8 +97,10 @@ pub enum BehaviourOut {
 	NotificationStreamOpened {
 		/// Node we opened the substream with.
 		remote: PeerId,
-		/// The concerned protocol. Each protocol uses a different substream.
-		protocol: ProtocolName,
+		/// Set ID.
+		set_id: SetId,
+		/// Direction of the stream.
+		direction: Direction,
 		/// If the negotiation didn't use the main name of the protocol (the one in
 		/// `notifications_protocol`), then this field contains which name has actually been
 		/// used.
@@ -106,8 +108,6 @@ pub enum BehaviourOut {
 		negotiated_fallback: Option<ProtocolName>,
 		/// Object that permits sending notifications to the peer.
 		notifications_sink: NotificationsSink,
-		/// Role of the remote.
-		role: ObservedRole,
 		/// Received handshake.
 		received_handshake: Vec<u8>,
 	},
@@ -120,8 +120,8 @@ pub enum BehaviourOut {
 	NotificationStreamReplaced {
 		/// Id of the peer we are connected to.
 		remote: PeerId,
-		/// The concerned protocol. Each protocol uses a different substream.
-		protocol: ProtocolName,
+		/// Set ID.
+		set_id: SetId,
 		/// Replacement for the previous [`NotificationsSink`].
 		notifications_sink: NotificationsSink,
 	},
@@ -131,16 +131,18 @@ pub enum BehaviourOut {
 	NotificationStreamClosed {
 		/// Node we closed the substream with.
 		remote: PeerId,
-		/// The concerned protocol. Each protocol uses a different substream.
-		protocol: ProtocolName,
+		/// Set ID.
+		set_id: SetId,
 	},
 
 	/// Received one or more messages from the given node using the given protocol.
 	NotificationsReceived {
 		/// Node we received the message from.
 		remote: PeerId,
+		/// Set ID.
+		set_id: SetId,
 		/// Concerned protocol and associated message.
-		messages: Vec<(ProtocolName, Bytes)>,
+		notification: Vec<u8>,
 	},
 
 	/// We have obtained identity information from a peer, including the addresses it is listening
@@ -272,44 +274,33 @@ impl<B: BlockT> Behaviour<B> {
 	}
 }
 
-fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole {
-	if roles.is_authority() {
-		ObservedRole::Authority
-	} else if roles.is_full() {
-		ObservedRole::Full
-	} else {
-		ObservedRole::Light
-	}
-}
-
 impl From<CustomMessageOutcome> for BehaviourOut {
 	fn from(event: CustomMessageOutcome) -> Self {
 		match event {
 			CustomMessageOutcome::NotificationStreamOpened {
 				remote,
-				protocol,
+				set_id,
+				direction,
 				negotiated_fallback,
-				roles,
 				received_handshake,
 				notifications_sink,
 			} => BehaviourOut::NotificationStreamOpened {
 				remote,
-				protocol,
+				set_id,
+				direction,
 				negotiated_fallback,
-				role: reported_roles_to_observed_role(roles),
 				received_handshake,
 				notifications_sink,
 			},
 			CustomMessageOutcome::NotificationStreamReplaced {
 				remote,
-				protocol,
+				set_id,
 				notifications_sink,
-			} => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink },
-			CustomMessageOutcome::NotificationStreamClosed { remote, protocol } =>
-				BehaviourOut::NotificationStreamClosed { remote, protocol },
-			CustomMessageOutcome::NotificationsReceived { remote, messages } =>
-				BehaviourOut::NotificationsReceived { remote, messages },
-			CustomMessageOutcome::None => BehaviourOut::None,
+			} => BehaviourOut::NotificationStreamReplaced { remote, set_id, notifications_sink },
+			CustomMessageOutcome::NotificationStreamClosed { remote, set_id } =>
+				BehaviourOut::NotificationStreamClosed { remote, set_id },
+			CustomMessageOutcome::NotificationsReceived { remote, set_id, notification } =>
+				BehaviourOut::NotificationsReceived { remote, set_id, notification },
 		}
 	}
 }
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index 124d73a74db..24e96843c32 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -23,10 +23,11 @@
 
 pub use crate::{
 	discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR,
-	protocol::NotificationsSink,
+	protocol::{notification_service, NotificationsSink, ProtocolHandlePair},
 	request_responses::{
 		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
 	},
+	service::traits::NotificationService,
 	types::ProtocolName,
 };
 
@@ -47,7 +48,6 @@ pub use sc_network_common::{
 	ExHashT,
 };
 
-use sc_utils::mpsc::TracingUnboundedSender;
 use sp_runtime::traits::Block as BlockT;
 
 use std::{
@@ -454,14 +454,14 @@ impl Default for SetConfig {
 ///
 /// > **Note**: As new fields might be added in the future, please consider using the `new` method
 /// >			and modifiers instead of creating this struct manually.
-#[derive(Clone, Debug)]
+#[derive(Debug)]
 pub struct NonDefaultSetConfig {
 	/// Name of the notifications protocols of this set. A substream on this set will be
 	/// considered established once this protocol is open.
 	///
 	/// > **Note**: This field isn't present for the default set, as this is handled internally
 	/// > by the networking code.
-	pub notifications_protocol: ProtocolName,
+	protocol_name: ProtocolName,
 
 	/// If the remote reports that it doesn't support the protocol indicated in the
 	/// `notifications_protocol` field, then each of these fallback names will be tried one by
@@ -469,37 +469,84 @@ pub struct NonDefaultSetConfig {
 	///
 	/// If a fallback is used, it will be reported in
 	/// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback`
-	pub fallback_names: Vec<ProtocolName>,
+	fallback_names: Vec<ProtocolName>,
 
 	/// Handshake of the protocol
 	///
 	/// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more
 	/// details. This field is temporarily used to allow moving the hardcoded block announcement
 	/// protocol out of `protocol.rs`.
-	pub handshake: Option<NotificationHandshake>,
+	handshake: Option<NotificationHandshake>,
 
 	/// Maximum allowed size of single notifications.
-	pub max_notification_size: u64,
+	max_notification_size: u64,
 
 	/// Base configuration.
-	pub set_config: SetConfig,
+	set_config: SetConfig,
+
+	/// Notification handle.
+	///
+	/// Notification handle is created during `NonDefaultSetConfig` creation and its other half,
+	/// `Box<dyn NotificationService>` is given to the protocol created the config and
+	/// `ProtocolHandle` is given to `Notifications` when it initializes itself. This handle allows
+	/// `Notifications ` to communicate with the protocol directly without relaying events through
+	/// `sc-network.`
+	protocol_handle_pair: ProtocolHandlePair,
 }
 
 impl NonDefaultSetConfig {
 	/// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes.
-	pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self {
-		Self {
-			notifications_protocol,
-			max_notification_size,
-			fallback_names: Vec::new(),
-			handshake: None,
-			set_config: SetConfig {
-				in_peers: 0,
-				out_peers: 0,
-				reserved_nodes: Vec::new(),
-				non_reserved_mode: NonReservedPeerMode::Deny,
+	/// Also returns an object which allows the protocol to communicate with `Notifications`.
+	pub fn new(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_notification_size: u64,
+		handshake: Option<NotificationHandshake>,
+		set_config: SetConfig,
+	) -> (Self, Box<dyn NotificationService>) {
+		let (protocol_handle_pair, notification_service) =
+			notification_service(protocol_name.clone());
+		(
+			Self {
+				protocol_name,
+				max_notification_size,
+				fallback_names,
+				handshake,
+				set_config,
+				protocol_handle_pair,
 			},
-		}
+			notification_service,
+		)
+	}
+
+	/// Get reference to protocol name.
+	pub fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+
+	/// Get reference to fallback protocol names.
+	pub fn fallback_names(&self) -> impl Iterator<Item = &ProtocolName> {
+		self.fallback_names.iter()
+	}
+
+	/// Get reference to handshake.
+	pub fn handshake(&self) -> &Option<NotificationHandshake> {
+		&self.handshake
+	}
+
+	/// Get maximum notification size.
+	pub fn max_notification_size(&self) -> u64 {
+		self.max_notification_size
+	}
+
+	/// Get reference to `SetConfig`.
+	pub fn set_config(&self) -> &SetConfig {
+		&self.set_config
+	}
+
+	/// Take `ProtocolHandlePair` from `NonDefaultSetConfig`
+	pub fn take_protocol_handle(self) -> ProtocolHandlePair {
+		self.protocol_handle_pair
 	}
 
 	/// Modifies the configuration to allow non-reserved nodes.
@@ -703,9 +750,6 @@ pub struct Params<Block: BlockT> {
 
 	/// Block announce protocol configuration
 	pub block_announce_config: NonDefaultSetConfig,
-
-	/// TX channel for direct communication with `SyncingEngine` and `Protocol`.
-	pub tx: TracingUnboundedSender<crate::event::SyncEvent<Block>>,
 }
 
 /// Full network configuration.
diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs
index f0828fb821f..01e8356fb55 100644
--- a/substrate/client/network/src/error.rs
+++ b/substrate/client/network/src/error.rs
@@ -68,6 +68,15 @@ pub enum Error {
 		/// Name of the protocol registered multiple times.
 		protocol: ProtocolName,
 	},
+	/// Peer does not exist.
+	#[error("Peer `{0}` does not exist.")]
+	PeerDoesntExist(PeerId),
+	/// Channel closed.
+	#[error("Channel closed")]
+	ChannelClosed,
+	/// Connection closed.
+	#[error("Connection closed")]
+	ConnectionClosed,
 }
 
 // Make `Debug` use the `Display` implementation.
diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs
index 2913f0b5522..dc4fd53a49a 100644
--- a/substrate/client/network/src/event.rs
+++ b/substrate/client/network/src/event.rs
@@ -19,14 +19,12 @@
 //! Network event types. These are are not the part of the protocol, but rather
 //! events that happen on the network like DHT get/put results received.
 
-use crate::{types::ProtocolName, NotificationsSink};
+use crate::types::ProtocolName;
 
 use bytes::Bytes;
-use futures::channel::oneshot;
 use libp2p::{kad::record::Key, PeerId};
 
-use sc_network_common::{role::ObservedRole, sync::message::BlockAnnouncesHandshake};
-use sp_runtime::traits::Block as BlockT;
+use sc_network_common::role::ObservedRole;
 
 /// Events generated by DHT as a response to get_value and put_value requests.
 #[derive(Debug, Clone)]
@@ -92,46 +90,3 @@ pub enum Event {
 		messages: Vec<(ProtocolName, Bytes)>,
 	},
 }
-
-/// Event sent to `SyncingEngine`
-// TODO: remove once `NotificationService` is implemented.
-pub enum SyncEvent<B: BlockT> {
-	/// Opened a substream with the given node with the given notifications protocol.
-	///
-	/// The protocol is always one of the notification protocols that have been registered.
-	NotificationStreamOpened {
-		/// Node we opened the substream with.
-		remote: PeerId,
-		/// Received handshake.
-		received_handshake: BlockAnnouncesHandshake<B>,
-		/// Notification sink.
-		sink: NotificationsSink,
-		/// Is the connection inbound.
-		inbound: bool,
-		/// Channel for reporting accept/reject of the substream.
-		tx: oneshot::Sender<bool>,
-	},
-
-	/// Closed a substream with the given node. Always matches a corresponding previous
-	/// `NotificationStreamOpened` message.
-	NotificationStreamClosed {
-		/// Node we closed the substream with.
-		remote: PeerId,
-	},
-
-	/// Notification sink was replaced.
-	NotificationSinkReplaced {
-		/// Node we closed the substream with.
-		remote: PeerId,
-		/// Notification sink.
-		sink: NotificationsSink,
-	},
-
-	/// Received one or more messages from the given node using the given protocol.
-	NotificationsReceived {
-		/// Node we received the message from.
-		remote: PeerId,
-		/// Concerned protocol and associated message.
-		messages: Vec<Bytes>,
-	},
-}
diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs
index 4dc9bdb4cc1..4c39c57e8df 100644
--- a/substrate/client/network/src/lib.rs
+++ b/substrate/client/network/src/lib.rs
@@ -244,7 +244,6 @@
 
 mod behaviour;
 mod protocol;
-mod service;
 
 #[cfg(test)]
 mod mock;
@@ -258,25 +257,30 @@ pub mod peer_info;
 pub mod peer_store;
 pub mod protocol_controller;
 pub mod request_responses;
+pub mod service;
 pub mod transport;
 pub mod types;
 pub mod utils;
 
-pub use event::{DhtEvent, Event, SyncEvent};
+pub use event::{DhtEvent, Event};
 #[doc(inline)]
 pub use libp2p::{multiaddr, Multiaddr, PeerId};
 pub use request_responses::{Config, IfDisconnected, RequestFailure};
-pub use sc_network_common::{role::ObservedRole, types::ReputationChange};
+pub use sc_network_common::{
+	role::{ObservedRole, Roles},
+	types::ReputationChange,
+};
 pub use service::{
 	signature::Signature,
 	traits::{
-		KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkEventStream, NetworkNotification,
-		NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo, NetworkStatus,
-		NetworkStatusProvider, NetworkSyncForkRequest, NotificationSender as NotificationSenderT,
-		NotificationSenderError, NotificationSenderReady,
+		KademliaKey, MessageSink, NetworkBlock, NetworkDHTProvider, NetworkEventStream,
+		NetworkNotification, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo,
+		NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest,
+		NotificationSender as NotificationSenderT, NotificationSenderError,
+		NotificationSenderReady, NotificationService,
 	},
-	DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationsSink,
-	OutboundFailure, PublicKey,
+	DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure,
+	PublicKey,
 };
 pub use types::ProtocolName;
 
diff --git a/substrate/client/network/src/mock.rs b/substrate/client/network/src/mock.rs
index bc596b0fa57..534b8118970 100644
--- a/substrate/client/network/src/mock.rs
+++ b/substrate/client/network/src/mock.rs
@@ -20,6 +20,7 @@
 
 use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange};
 use libp2p::PeerId;
+use sc_network_common::role::ObservedRole;
 use std::collections::HashSet;
 
 /// No-op `PeerStore`.
@@ -49,6 +50,14 @@ impl PeerStoreProvider for MockPeerStore {
 		0
 	}
 
+	fn peer_role(&self, _peer_id: &PeerId) -> Option<ObservedRole> {
+		None
+	}
+
+	fn set_peer_role(&mut self, _peer_id: &PeerId, _role: ObservedRole) {
+		unimplemented!();
+	}
+
 	fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec<PeerId> {
 		unimplemented!()
 	}
diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs
index 35d17e588cb..4b28b8e7544 100644
--- a/substrate/client/network/src/peer_store.rs
+++ b/substrate/client/network/src/peer_store.rs
@@ -23,7 +23,7 @@ use libp2p::PeerId;
 use log::trace;
 use parking_lot::Mutex;
 use partial_sort::PartialSort;
-use sc_network_common::types::ReputationChange;
+use sc_network_common::{role::ObservedRole, types::ReputationChange};
 use std::{
 	cmp::{Ord, Ordering, PartialOrd},
 	collections::{hash_map::Entry, HashMap, HashSet},
@@ -66,9 +66,15 @@ pub trait PeerStoreProvider: Debug + Send {
 	/// Adjust peer reputation.
 	fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
 
+	/// Set peer role.
+	fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
+
 	/// Get peer reputation.
 	fn peer_reputation(&self, peer_id: &PeerId) -> i32;
 
+	/// Get peer role, if available.
+	fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
+
 	/// Get candidates with highest reputations for initiating outgoing connections.
 	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId>;
 }
@@ -96,10 +102,18 @@ impl PeerStoreProvider for PeerStoreHandle {
 		self.inner.lock().report_peer(peer_id, change)
 	}
 
+	fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) {
+		self.inner.lock().set_peer_role(peer_id, role)
+	}
+
 	fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
 		self.inner.lock().peer_reputation(peer_id)
 	}
 
+	fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole> {
+		self.inner.lock().peer_role(peer_id)
+	}
+
 	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
 		self.inner.lock().outgoing_candidates(count, ignored)
 	}
@@ -122,13 +136,19 @@ impl PeerStoreHandle {
 
 #[derive(Debug, Clone, Copy)]
 struct PeerInfo {
+	/// Reputation of the peer.
 	reputation: i32,
+
+	/// Instant when the peer was last updated.
 	last_updated: Instant,
+
+	/// Role of the peer, if known.
+	role: Option<ObservedRole>,
 }
 
 impl Default for PeerInfo {
 	fn default() -> Self {
-		Self { reputation: 0, last_updated: Instant::now() }
+		Self { reputation: 0, last_updated: Instant::now(), role: None }
 	}
 }
 
@@ -242,10 +262,27 @@ impl PeerStoreInner {
 		}
 	}
 
+	fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) {
+		log::trace!(target: LOG_TARGET, "Set {peer_id} role to {role:?}");
+
+		match self.peers.entry(*peer_id) {
+			Entry::Occupied(mut entry) => {
+				entry.get_mut().role = Some(role);
+			},
+			Entry::Vacant(entry) => {
+				entry.insert(PeerInfo { role: Some(role), ..Default::default() });
+			},
+		}
+	}
+
 	fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
 		self.peers.get(peer_id).map_or(0, |info| info.reputation)
 	}
 
+	fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole> {
+		self.peers.get(peer_id).map_or(None, |info| info.role)
+	}
+
 	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
 		let mut candidates = self
 			.peers
diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index 9b94f288352..ea7977cc9ae 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -20,12 +20,11 @@ use crate::{
 	config, error,
 	peer_store::{PeerStoreHandle, PeerStoreProvider},
 	protocol_controller::{self, SetId},
+	service::traits::Direction,
 	types::ProtocolName,
 };
 
-use bytes::Bytes;
-use codec::{DecodeAll, Encode};
-use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt};
+use codec::Encode;
 use libp2p::{
 	core::Endpoint,
 	swarm::{
@@ -34,24 +33,23 @@ use libp2p::{
 	},
 	Multiaddr, PeerId,
 };
-use log::{debug, error, warn};
+use log::warn;
 
-use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake};
-use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender};
+use codec::DecodeAll;
+use prometheus_endpoint::Registry;
+use sc_network_common::role::Roles;
+use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_runtime::traits::Block as BlockT;
 
-use std::{
-	collections::{HashMap, HashSet},
-	future::Future,
-	iter,
-	pin::Pin,
-	task::Poll,
-};
+use std::{collections::HashSet, iter, task::Poll};
 
-use message::{generic::Message as GenericMessage, Message};
 use notifications::{Notifications, NotificationsOut};
 
-pub use notifications::{NotificationsSink, NotifsHandlerError, Ready};
+pub(crate) use notifications::ProtocolHandle;
+
+pub use notifications::{
+	notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready,
+};
 
 mod notifications;
 
@@ -64,85 +62,93 @@ pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 *
 /// Identifier of the peerset for the block announces protocol.
 const HARDCODED_PEERSETS_SYNC: SetId = SetId::from(0);
 
-mod rep {
-	use crate::ReputationChange as Rep;
-	/// We received a message that failed to decode.
-	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
-}
-
-type PendingSyncSubstreamValidation =
-	Pin<Box<dyn Future<Output = Result<(PeerId, Roles), PeerId>> + Send>>;
-
 // Lock must always be taken in order declared here.
 pub struct Protocol<B: BlockT> {
-	/// Used to report reputation changes.
-	peer_store_handle: PeerStoreHandle,
 	/// Handles opening the unique substream and sending and receiving raw messages.
 	behaviour: Notifications,
 	/// List of notifications protocols that have been registered.
 	notification_protocols: Vec<ProtocolName>,
-	/// If we receive a new "substream open" event that contains an invalid handshake, we ask the
-	/// inner layer to force-close the substream. Force-closing the substream will generate a
-	/// "substream closed" event. This is a problem: since we can't propagate the "substream open"
-	/// event to the outer layers, we also shouldn't propagate this "substream closed" event. To
-	/// solve this, an entry is added to this map whenever an invalid handshake is received.
-	/// Entries are removed when the corresponding "substream closed" is later received.
-	bad_handshake_substreams: HashSet<(PeerId, SetId)>,
-	/// Connected peers on sync protocol.
-	peers: HashMap<PeerId, Roles>,
-	sync_substream_validations: FuturesUnordered<PendingSyncSubstreamValidation>,
-	tx: TracingUnboundedSender<crate::event::SyncEvent<B>>,
+	/// Handle to `PeerStore`.
+	peer_store_handle: PeerStoreHandle,
+	/// Streams for peers whose handshake couldn't be determined.
+	bad_handshake_streams: HashSet<PeerId>,
+	sync_handle: ProtocolHandle,
 	_marker: std::marker::PhantomData<B>,
 }
 
 impl<B: BlockT> Protocol<B> {
 	/// Create a new instance.
-	pub fn new(
+	pub(crate) fn new(
 		roles: Roles,
+		registry: &Option<Registry>,
 		notification_protocols: Vec<config::NonDefaultSetConfig>,
 		block_announces_protocol: config::NonDefaultSetConfig,
 		peer_store_handle: PeerStoreHandle,
 		protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
 		from_protocol_controllers: TracingUnboundedReceiver<protocol_controller::Message>,
-		tx: TracingUnboundedSender<crate::event::SyncEvent<B>>,
-	) -> error::Result<Self> {
-		let behaviour = {
-			Notifications::new(
-				protocol_controller_handles,
-				from_protocol_controllers,
-				// NOTE: Block announcement protocol is still very much hardcoded into `Protocol`.
-				// 	This protocol must be the first notification protocol given to
-				// `Notifications`
-				iter::once(notifications::ProtocolConfig {
-					name: block_announces_protocol.notifications_protocol.clone(),
-					fallback_names: block_announces_protocol.fallback_names.clone(),
-					handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(),
-					max_notification_size: block_announces_protocol.max_notification_size,
-				})
-				.chain(notification_protocols.iter().map(|s| notifications::ProtocolConfig {
-					name: s.notifications_protocol.clone(),
-					fallback_names: s.fallback_names.clone(),
-					handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()),
-					max_notification_size: s.max_notification_size,
-				})),
+	) -> error::Result<(Self, Vec<ProtocolHandle>)> {
+		let (behaviour, notification_protocols, handles) = {
+			let installed_protocols = iter::once(block_announces_protocol.protocol_name().clone())
+				.chain(notification_protocols.iter().map(|p| p.protocol_name().clone()))
+				.collect::<Vec<_>>();
+
+			// NOTE: Block announcement protocol is still very much hardcoded into
+			// `Protocol`. 	This protocol must be the first notification protocol given to
+			// `Notifications`
+			let (protocol_configs, handles): (Vec<_>, Vec<_>) = iter::once({
+				let config = notifications::ProtocolConfig {
+					name: block_announces_protocol.protocol_name().clone(),
+					fallback_names: block_announces_protocol.fallback_names().cloned().collect(),
+					handshake: block_announces_protocol.handshake().as_ref().unwrap().to_vec(),
+					max_notification_size: block_announces_protocol.max_notification_size(),
+				};
+
+				let (handle, command_stream) =
+					block_announces_protocol.take_protocol_handle().split();
+
+				((config, handle.clone(), command_stream), handle)
+			})
+			.chain(notification_protocols.into_iter().map(|s| {
+				let config = notifications::ProtocolConfig {
+					name: s.protocol_name().clone(),
+					fallback_names: s.fallback_names().cloned().collect(),
+					handshake: s.handshake().as_ref().map_or(roles.encode(), |h| (*h).to_vec()),
+					max_notification_size: s.max_notification_size(),
+				};
+
+				let (handle, command_stream) = s.take_protocol_handle().split();
+
+				((config, handle.clone(), command_stream), handle)
+			}))
+			.unzip();
+
+			(
+				Notifications::new(
+					protocol_controller_handles,
+					from_protocol_controllers,
+					registry,
+					protocol_configs.into_iter(),
+				),
+				installed_protocols,
+				handles,
 			)
 		};
 
 		let protocol = Self {
-			peer_store_handle,
 			behaviour,
-			notification_protocols: iter::once(block_announces_protocol.notifications_protocol)
-				.chain(notification_protocols.iter().map(|s| s.notifications_protocol.clone()))
-				.collect(),
-			bad_handshake_substreams: Default::default(),
-			peers: HashMap::new(),
-			sync_substream_validations: FuturesUnordered::new(),
-			tx,
+			sync_handle: handles[0].clone(),
+			peer_store_handle,
+			notification_protocols,
+			bad_handshake_streams: HashSet::new(),
 			// TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol`
 			_marker: Default::default(),
 		};
 
-		Ok(protocol)
+		Ok((protocol, handles))
+	}
+
+	pub fn num_sync_peers(&self) -> usize {
+		self.sync_handle.num_peers()
 	}
 
 	/// Returns the list of all the peers we have an open channel to.
@@ -163,21 +169,12 @@ impl<B: BlockT> Protocol<B> {
 		}
 	}
 
-	/// Returns the number of peers we're connected to on sync protocol.
-	pub fn num_connected_peers(&self) -> usize {
-		self.peers.len()
-	}
-
-	/// Set handshake for the notification protocol.
-	pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec<u8>) {
-		if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) {
-			self.behaviour.set_notif_protocol_handshake(SetId::from(index), handshake);
-		} else {
-			error!(
-				target: "sub-libp2p",
-				"set_notification_handshake with unknown protocol: {}",
-				protocol
-			);
+	/// Check if role is available for `peer_id` by attempt to decode the handshake to roles and if
+	/// that fails, check if the role has been registered to `PeerStore`.
+	fn role_available(&self, peer_id: &PeerId, handshake: &Vec<u8>) -> bool {
+		match Roles::decode_all(&mut &handshake[..]) {
+			Ok(_) => true,
+			Err(_) => self.peer_store_handle.peer_role(&peer_id).is_some(),
 		}
 	}
 }
@@ -189,25 +186,42 @@ pub enum CustomMessageOutcome {
 	/// Notification protocols have been opened with a remote.
 	NotificationStreamOpened {
 		remote: PeerId,
-		protocol: ProtocolName,
+		// protocol: ProtocolName,
+		set_id: SetId,
+		/// Direction of the stream.
+		direction: Direction,
 		/// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`].
 		negotiated_fallback: Option<ProtocolName>,
-		roles: Roles,
+		/// Received handshake.
 		received_handshake: Vec<u8>,
+		/// Notification sink.
 		notifications_sink: NotificationsSink,
 	},
 	/// The [`NotificationsSink`] of some notification protocols need an update.
 	NotificationStreamReplaced {
+		// Peer ID.
 		remote: PeerId,
-		protocol: ProtocolName,
+		/// Set ID.
+		set_id: SetId,
+		/// New notification sink.
 		notifications_sink: NotificationsSink,
 	},
 	/// Notification protocols have been closed with a remote.
-	NotificationStreamClosed { remote: PeerId, protocol: ProtocolName },
+	NotificationStreamClosed {
+		// Peer ID.
+		remote: PeerId,
+		/// Set ID.
+		set_id: SetId,
+	},
 	/// Messages have been received on one or more notifications protocols.
-	NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> },
-	/// Now connected to a new peer for syncing purposes.
-	None,
+	NotificationsReceived {
+		// Peer ID.
+		remote: PeerId,
+		/// Set ID.
+		set_id: SetId,
+		/// Received notification.
+		notification: Vec<u8>,
+	},
 }
 
 impl<B: BlockT> NetworkBehaviour for Protocol<B> {
@@ -274,23 +288,6 @@ impl<B: BlockT> NetworkBehaviour for Protocol<B> {
 		cx: &mut std::task::Context,
 		params: &mut impl PollParameters,
 	) -> Poll<ToSwarm<Self::OutEvent, THandlerInEvent<Self>>> {
-		while let Poll::Ready(Some(validation_result)) =
-			self.sync_substream_validations.poll_next_unpin(cx)
-		{
-			match validation_result {
-				Ok((peer, roles)) => {
-					self.peers.insert(peer, roles);
-				},
-				Err(peer) => {
-					log::debug!(
-						target: "sub-libp2p",
-						"`SyncingEngine` rejected stream"
-					);
-					self.behaviour.disconnect_peer(&peer, HARDCODED_PEERSETS_SYNC);
-				},
-			}
-		}
-
 		let event = match self.behaviour.poll(cx, params) {
 			Poll::Pending => return Poll::Pending,
 			Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev,
@@ -307,204 +304,86 @@ impl<B: BlockT> NetworkBehaviour for Protocol<B> {
 			NotificationsOut::CustomProtocolOpen {
 				peer_id,
 				set_id,
+				direction,
 				received_handshake,
 				notifications_sink,
 				negotiated_fallback,
-				inbound,
-			} => {
-				// Set number 0 is hardcoded the default set of peers we sync from.
+				..
+			} =>
 				if set_id == HARDCODED_PEERSETS_SYNC {
-					// `received_handshake` can be either a `Status` message if received from the
-					// legacy substream ,or a `BlockAnnouncesHandshake` if received from the block
-					// announces substream.
-					match <Message<B> as DecodeAll>::decode_all(&mut &received_handshake[..]) {
-						Ok(GenericMessage::Status(handshake)) => {
-							let roles = handshake.roles;
-							let handshake = BlockAnnouncesHandshake::<B> {
-								roles: handshake.roles,
-								best_number: handshake.best_number,
-								best_hash: handshake.best_hash,
-								genesis_hash: handshake.genesis_hash,
-							};
-
-							let (tx, rx) = oneshot::channel();
-							let _ = self.tx.unbounded_send(
-								crate::SyncEvent::NotificationStreamOpened {
-									inbound,
-									remote: peer_id,
-									received_handshake: handshake,
-									sink: notifications_sink,
-									tx,
-								},
-							);
-							self.sync_substream_validations.push(Box::pin(async move {
-								match rx.await {
-									Ok(accepted) =>
-										if accepted {
-											Ok((peer_id, roles))
-										} else {
-											Err(peer_id)
-										},
-									Err(_) => Err(peer_id),
-								}
-							}));
-
-							CustomMessageOutcome::None
-						},
-						Ok(msg) => {
-							debug!(
-								target: "sync",
-								"Expected Status message from {}, but got {:?}",
-								peer_id,
-								msg,
-							);
-							self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE);
-							CustomMessageOutcome::None
-						},
-						Err(err) => {
-							match <BlockAnnouncesHandshake<B> as DecodeAll>::decode_all(
-								&mut &received_handshake[..],
-							) {
-								Ok(handshake) => {
-									let roles = handshake.roles;
-
-									let (tx, rx) = oneshot::channel();
-									let _ = self.tx.unbounded_send(
-										crate::SyncEvent::NotificationStreamOpened {
-											inbound,
-											remote: peer_id,
-											received_handshake: handshake,
-											sink: notifications_sink,
-											tx,
-										},
-									);
-									self.sync_substream_validations.push(Box::pin(async move {
-										match rx.await {
-											Ok(accepted) =>
-												if accepted {
-													Ok((peer_id, roles))
-												} else {
-													Err(peer_id)
-												},
-											Err(_) => Err(peer_id),
-										}
-									}));
-									CustomMessageOutcome::None
-								},
-								Err(err2) => {
-									log::debug!(
-										target: "sync",
-										"Couldn't decode handshake sent by {}: {:?}: {} & {}",
-										peer_id,
-										received_handshake,
-										err,
-										err2,
-									);
-									self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE);
-									CustomMessageOutcome::None
-								},
-							}
-						},
-					}
+					let _ = self.sync_handle.report_substream_opened(
+						peer_id,
+						direction,
+						received_handshake,
+						negotiated_fallback,
+						notifications_sink,
+					);
+					None
 				} else {
-					match (
-						Roles::decode_all(&mut &received_handshake[..]),
-						self.peers.get(&peer_id),
-					) {
-						(Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened {
+					match self.role_available(&peer_id, &received_handshake) {
+						true => Some(CustomMessageOutcome::NotificationStreamOpened {
 							remote: peer_id,
-							protocol: self.notification_protocols[usize::from(set_id)].clone(),
+							set_id,
+							direction,
 							negotiated_fallback,
-							roles,
 							received_handshake,
 							notifications_sink,
-						},
-						(Err(_), Some(roles)) if received_handshake.is_empty() => {
-							// As a convenience, we allow opening substreams for "external"
-							// notification protocols with an empty handshake. This fetches the
-							// roles from the locally-known roles.
-							// TODO: remove this after https://github.com/paritytech/substrate/issues/5685
-							CustomMessageOutcome::NotificationStreamOpened {
-								remote: peer_id,
-								protocol: self.notification_protocols[usize::from(set_id)].clone(),
-								negotiated_fallback,
-								roles: *roles,
-								received_handshake,
-								notifications_sink,
-							}
-						},
-						(Err(err), _) => {
-							debug!(target: "sync", "Failed to parse remote handshake: {}", err);
-							self.bad_handshake_substreams.insert((peer_id, set_id));
-							self.behaviour.disconnect_peer(&peer_id, set_id);
-							self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE);
-							CustomMessageOutcome::None
+						}),
+						false => {
+							self.bad_handshake_streams.insert(peer_id);
+							None
 						},
 					}
-				}
-			},
+				},
 			NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } =>
-				if self.bad_handshake_substreams.contains(&(peer_id, set_id)) {
-					CustomMessageOutcome::None
-				} else if set_id == HARDCODED_PEERSETS_SYNC {
-					let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationSinkReplaced {
-						remote: peer_id,
-						sink: notifications_sink,
-					});
-					CustomMessageOutcome::None
+				if set_id == HARDCODED_PEERSETS_SYNC {
+					let _ = self
+						.sync_handle
+						.report_notification_sink_replaced(peer_id, notifications_sink);
+					None
 				} else {
-					CustomMessageOutcome::NotificationStreamReplaced {
-						remote: peer_id,
-						protocol: self.notification_protocols[usize::from(set_id)].clone(),
-						notifications_sink,
-					}
+					(!self.bad_handshake_streams.contains(&peer_id)).then_some(
+						CustomMessageOutcome::NotificationStreamReplaced {
+							remote: peer_id,
+							set_id,
+							notifications_sink,
+						},
+					)
 				},
 			NotificationsOut::CustomProtocolClosed { peer_id, set_id } => {
-				if self.bad_handshake_substreams.remove(&(peer_id, set_id)) {
-					// The substream that has just been closed had been opened with a bad
-					// handshake. The outer layers have never received an opening event about this
-					// substream, and consequently shouldn't receive a closing event either.
-					CustomMessageOutcome::None
-				} else if set_id == HARDCODED_PEERSETS_SYNC {
-					let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationStreamClosed {
-						remote: peer_id,
-					});
-					self.peers.remove(&peer_id);
-					CustomMessageOutcome::None
+				if set_id == HARDCODED_PEERSETS_SYNC {
+					let _ = self.sync_handle.report_substream_closed(peer_id);
+					None
 				} else {
-					CustomMessageOutcome::NotificationStreamClosed {
-						remote: peer_id,
-						protocol: self.notification_protocols[usize::from(set_id)].clone(),
-					}
+					(!self.bad_handshake_streams.remove(&peer_id)).then_some(
+						CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, set_id },
+					)
 				}
 			},
 			NotificationsOut::Notification { peer_id, set_id, message } => {
-				if self.bad_handshake_substreams.contains(&(peer_id, set_id)) {
-					CustomMessageOutcome::None
-				} else if set_id == HARDCODED_PEERSETS_SYNC {
-					let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationsReceived {
-						remote: peer_id,
-						messages: vec![message.freeze()],
-					});
-					CustomMessageOutcome::None
+				if set_id == HARDCODED_PEERSETS_SYNC {
+					let _ = self
+						.sync_handle
+						.report_notification_received(peer_id, message.freeze().into());
+					None
 				} else {
-					let protocol_name = self.notification_protocols[usize::from(set_id)].clone();
-					CustomMessageOutcome::NotificationsReceived {
-						remote: peer_id,
-						messages: vec![(protocol_name, message.freeze())],
-					}
+					(!self.bad_handshake_streams.contains(&peer_id)).then_some(
+						CustomMessageOutcome::NotificationsReceived {
+							remote: peer_id,
+							set_id,
+							notification: message.freeze().into(),
+						},
+					)
 				}
 			},
 		};
 
-		if !matches!(outcome, CustomMessageOutcome::None) {
-			return Poll::Ready(ToSwarm::GenerateEvent(outcome))
+		match outcome {
+			Some(event) => Poll::Ready(ToSwarm::GenerateEvent(event)),
+			None => {
+				cx.waker().wake_by_ref();
+				Poll::Pending
+			},
 		}
-
-		// This block can only be reached if an event was pulled from the behaviour and that
-		// resulted in `CustomMessageOutcome::None`. Since there might be another pending
-		// message from the behaviour, the task is scheduled again.
-		cx.waker().wake_by_ref();
-		Poll::Pending
 	}
 }
diff --git a/substrate/client/network/src/protocol/message.rs b/substrate/client/network/src/protocol/message.rs
index 66dca297537..247580083f9 100644
--- a/substrate/client/network/src/protocol/message.rs
+++ b/substrate/client/network/src/protocol/message.rs
@@ -29,6 +29,7 @@ use sc_network_common::message::RequestId;
 use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
 
 /// Type alias for using the message type using block type parameters.
+#[allow(unused)]
 pub type Message<B> = generic::Message<
 	<B as BlockT>::Header,
 	<B as BlockT>::Hash,
diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs
index aa49cfcf9d4..10fa329097d 100644
--- a/substrate/client/network/src/protocol/notifications.rs
+++ b/substrate/client/network/src/protocol/notifications.rs
@@ -22,9 +22,13 @@
 pub use self::{
 	behaviour::{Notifications, NotificationsOut, ProtocolConfig},
 	handler::{NotificationsSink, NotifsHandlerError, Ready},
+	service::{notification_service, ProtocolHandlePair},
 };
 
+pub(crate) use self::service::ProtocolHandle;
+
 mod behaviour;
 mod handler;
+mod service;
 mod tests;
 mod upgrade;
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index b78f15f8529..ef0c6540eee 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -17,16 +17,18 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use crate::{
-	protocol::notifications::handler::{
-		self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut,
+	protocol::notifications::{
+		handler::{self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut},
+		service::{metrics, NotificationCommand, ProtocolHandle, ValidationCallResult},
 	},
 	protocol_controller::{self, IncomingIndex, Message, SetId},
+	service::traits::{Direction, ValidationResult},
 	types::ProtocolName,
 };
 
 use bytes::BytesMut;
 use fnv::FnvHashMap;
-use futures::prelude::*;
+use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered};
 use libp2p::{
 	core::{ConnectedPoint, Endpoint, Multiaddr},
 	swarm::{
@@ -36,11 +38,15 @@ use libp2p::{
 	},
 	PeerId,
 };
-use log::{debug, error, info, trace, warn};
+use log::{debug, error, trace, warn};
 use parking_lot::RwLock;
+use prometheus_endpoint::Registry;
 use rand::distributions::{Distribution as _, Uniform};
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use smallvec::SmallVec;
+use tokio::sync::oneshot::error::RecvError;
+use tokio_stream::StreamMap;
+
 use std::{
 	cmp,
 	collections::{hash_map::Entry, VecDeque},
@@ -51,6 +57,13 @@ use std::{
 	time::{Duration, Instant},
 };
 
+/// Type representing a pending substream validation.
+type PendingInboundValidation =
+	BoxFuture<'static, (Result<ValidationResult, RecvError>, IncomingIndex)>;
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
 /// Network behaviour that handles opening substreams for custom protocols with other peers.
 ///
 /// # How it works
@@ -106,6 +119,12 @@ pub struct Notifications {
 	/// Notification protocols. Entries never change after initialization.
 	notif_protocols: Vec<handler::ProtocolConfig>,
 
+	/// Protocol handles.
+	protocol_handles: Vec<ProtocolHandle>,
+
+	// Command streams.
+	command_streams: StreamMap<usize, Box<dyn Stream<Item = NotificationCommand> + Send + Unpin>>,
+
 	/// Protocol controllers are responsible for peer connections management.
 	protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
 
@@ -138,6 +157,18 @@ pub struct Notifications {
 
 	/// Events to produce from `poll()`.
 	events: VecDeque<ToSwarm<NotificationsOut, NotifsHandlerIn>>,
+
+	/// Pending inbound substream validations.
+	//
+	// NOTE: it's possible to read a stale response from `pending_inbound_validations`
+	// as the substream may get closed by the remote peer before the protocol has had
+	// a chance to validate it. [`Notifications`] must compare the `crate::peerset::IncomingIndex`
+	// returned by the completed future against the `crate::peerset::IncomingIndex` stored in
+	// `PeerState::Incoming` to check whether the completed future is stale or not.
+	pending_inbound_validations: FuturesUnordered<PendingInboundValidation>,
+
+	/// Metrics for notifications.
+	metrics: Option<metrics::Metrics>,
 }
 
 /// Configuration for a notifications protocol.
@@ -235,6 +266,9 @@ enum PeerState {
 		/// Incoming index tracking this connection.
 		incoming_index: IncomingIndex,
 
+		/// Peerset has signaled it wants the substream closed.
+		peerset_rejected: bool,
+
 		/// List of connections with this peer, and their state.
 		connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>,
 	},
@@ -303,6 +337,8 @@ struct IncomingPeer {
 	alive: bool,
 	/// Id that the we sent to the peerset.
 	incoming_id: IncomingIndex,
+	/// Received handshake.
+	handshake: Vec<u8>,
 }
 
 /// Event that can be emitted by the `Notifications`.
@@ -314,6 +350,8 @@ pub enum NotificationsOut {
 		peer_id: PeerId,
 		/// Peerset set ID the substream is tied to.
 		set_id: SetId,
+		/// Direction of the stream.
+		direction: Direction,
 		/// If `Some`, a fallback protocol name has been used rather the main protocol name.
 		/// Always matches one of the fallback names passed at initialization.
 		negotiated_fallback: Option<ProtocolName>,
@@ -364,24 +402,52 @@ pub enum NotificationsOut {
 
 impl Notifications {
 	/// Creates a `CustomProtos`.
-	pub fn new(
+	pub(crate) fn new(
 		protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
 		from_protocol_controllers: TracingUnboundedReceiver<Message>,
-		notif_protocols: impl Iterator<Item = ProtocolConfig>,
+		registry: &Option<Registry>,
+		notif_protocols: impl Iterator<
+			Item = (
+				ProtocolConfig,
+				ProtocolHandle,
+				Box<dyn Stream<Item = NotificationCommand> + Send + Unpin>,
+			),
+		>,
 	) -> Self {
-		let notif_protocols = notif_protocols
-			.map(|cfg| handler::ProtocolConfig {
-				name: cfg.name,
-				fallback_names: cfg.fallback_names,
-				handshake: Arc::new(RwLock::new(cfg.handshake)),
-				max_notification_size: cfg.max_notification_size,
+		let (notif_protocols, protocol_handles): (Vec<_>, Vec<_>) = notif_protocols
+			.map(|(cfg, protocol_handle, command_stream)| {
+				(
+					handler::ProtocolConfig {
+						name: cfg.name,
+						fallback_names: cfg.fallback_names,
+						handshake: Arc::new(RwLock::new(cfg.handshake)),
+						max_notification_size: cfg.max_notification_size,
+					},
+					(protocol_handle, command_stream),
+				)
 			})
-			.collect::<Vec<_>>();
-
+			.unzip();
 		assert!(!notif_protocols.is_empty());
 
+		let metrics = registry.as_ref().and_then(|registry| metrics::register(&registry).ok());
+		let (mut protocol_handles, command_streams): (Vec<_>, Vec<_>) = protocol_handles
+			.into_iter()
+			.enumerate()
+			.map(|(set_id, (mut protocol_handle, command_stream))| {
+				protocol_handle.set_metrics(metrics.clone());
+
+				(protocol_handle, (set_id, command_stream))
+			})
+			.unzip();
+
+		protocol_handles.iter_mut().skip(1).for_each(|handle| {
+			handle.delegate_to_peerset(true);
+		});
+
 		Self {
 			notif_protocols,
+			protocol_handles,
+			command_streams: StreamMap::from_iter(command_streams.into_iter()),
 			protocol_controller_handles,
 			from_protocol_controllers,
 			peers: FnvHashMap::default(),
@@ -390,6 +456,8 @@ impl Notifications {
 			incoming: SmallVec::new(),
 			next_incoming_index: IncomingIndex(0),
 			events: VecDeque::new(),
+			pending_inbound_validations: FuturesUnordered::new(),
+			metrics,
 		}
 	}
 
@@ -807,14 +875,21 @@ impl Notifications {
 				*entry.into_mut() = PeerState::Backoff { timer, timer_deadline }
 			},
 
-			// Invalid state transitions.
-			st @ PeerState::Incoming { .. } => {
-				info!(
+			// `ProtocolController` disconnected peer while it was still being validated by the
+			// protocol, mark the connection as rejected and once the validation is received from
+			// the protocol, reject the substream
+			PeerState::Incoming { backoff_until, connections, incoming_index, .. } => {
+				debug!(
 					target: "sub-libp2p",
 					"PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.",
 					entry.key().0, set_id,
 				);
-				*entry.into_mut() = st;
+				*entry.into_mut() = PeerState::Incoming {
+					backoff_until,
+					connections,
+					incoming_index,
+					peerset_rejected: true,
+				};
 			},
 			PeerState::Poisoned => {
 				error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key());
@@ -823,20 +898,71 @@ impl Notifications {
 		}
 	}
 
+	/// Substream has been accepted by the `ProtocolController` and must now be sent
+	/// to the protocol for validation.
+	fn peerset_report_preaccept(&mut self, index: IncomingIndex) {
+		let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) else {
+			error!(target: LOG_TARGET, "PSM => Preaccept({:?}): Invalid index", index);
+			return
+		};
+
+		trace!(
+			target: LOG_TARGET,
+			"PSM => Preaccept({:?}): Sent to protocol for validation",
+			index
+		);
+		let incoming = &self.incoming[pos];
+
+		match self.protocol_handles[usize::from(incoming.set_id)]
+			.report_incoming_substream(incoming.peer_id, incoming.handshake.clone())
+		{
+			Ok(ValidationCallResult::Delegated) => {
+				self.protocol_report_accept(index);
+			},
+			Ok(ValidationCallResult::WaitForValidation(rx)) => {
+				self.pending_inbound_validations
+					.push(Box::pin(async move { (rx.await, index) }));
+			},
+			Err(err) => {
+				// parachain collators enable the syncing protocol but `NotificationService` for
+				// `SyncingEngine` is not created which causes `report_incoming_substream()` to
+				// fail. This is not a fatal error and should be ignored even though in typical
+				// cases the `NotificationService` not existing is a fatal error and indicates that
+				// the protocol has exited. Until the parachain collator issue is fixed, just report
+				// and error and reject the peer.
+				debug!(target: LOG_TARGET, "protocol has exited: {err:?} {:?}", incoming.set_id);
+
+				self.protocol_report_reject(index);
+			},
+		}
+	}
+
 	/// Function that is called when the peerset wants us to accept a connection
 	/// request from a peer.
-	fn peerset_report_accept(&mut self, index: IncomingIndex) {
-		let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index)
-		{
-			self.incoming.remove(pos)
-		} else {
-			error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index);
-			return
+	fn protocol_report_accept(&mut self, index: IncomingIndex) {
+		let (pos, incoming) =
+			if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) {
+				(pos, self.incoming.get(pos))
+			} else {
+				error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index);
+				return
+			};
+
+		let Some(incoming) = incoming else {
+			error!(target: "sub-libp2p", "Incoming connection ({:?}) doesn't exist", index);
+			debug_assert!(false);
+			return;
 		};
 
 		if !incoming.alive {
-			trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming",
-				index, incoming.peer_id, incoming.set_id);
+			trace!(
+				target: "sub-libp2p",
+				"PSM => Accept({:?}, {}, {:?}): Obsolete incoming",
+				index,
+				incoming.peer_id,
+				incoming.set_id,
+			);
+
 			match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) {
 				Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => {
 				},
@@ -847,26 +973,42 @@ impl Notifications {
 						.dropped(incoming.peer_id);
 				},
 			}
+
+			self.incoming.remove(pos);
 			return
 		}
 
 		let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) {
 			Some(s) => s,
 			None => {
-				debug_assert!(false);
+				log::debug!(
+					target: "sub-libp2p",
+					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
+					incoming.peer_id,
+					incoming.set_id,
+					index,
+				);
+				self.incoming.remove(pos);
 				return
 			},
 		};
 
 		match mem::replace(state, PeerState::Poisoned) {
 			// Incoming => Enabled
-			PeerState::Incoming { mut connections, incoming_index, .. } => {
+			PeerState::Incoming {
+				mut connections,
+				incoming_index,
+				peerset_rejected,
+				backoff_until,
+			} => {
 				if index < incoming_index {
 					warn!(
 						target: "sub-libp2p",
 						"PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
+
+					self.incoming.remove(pos);
 					return
 				} else if index > incoming_index {
 					error!(
@@ -874,12 +1016,39 @@ impl Notifications {
 						"PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
+
+					self.incoming.remove(pos);
 					debug_assert!(false);
 					return
 				}
 
-				trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.",
-					index, incoming.peer_id, incoming.set_id);
+				// while the substream was being validated by the protocol, `Peerset` had request
+				// for the it to be closed so reject the substream now
+				if peerset_rejected {
+					trace!(
+						target: "sub-libp2p",
+						"Protocol accepted ({:?} {:?} {:?}) but Peerset had request disconnection, rejecting",
+						index,
+						incoming.peer_id,
+						incoming.set_id
+					);
+
+					*state = PeerState::Incoming {
+						connections,
+						backoff_until,
+						peerset_rejected,
+						incoming_index,
+					};
+					return self.report_reject(index).map_or((), |_| ());
+				}
+
+				trace!(
+					target: "sub-libp2p",
+					"PSM => Accept({:?}, {}, {:?}): Enabling connections.",
+					index,
+					incoming.peer_id,
+					incoming.set_id
+				);
 
 				debug_assert!(connections
 					.iter()
@@ -898,53 +1067,85 @@ impl Notifications {
 					*connec_state = ConnectionState::Opening;
 				}
 
+				self.incoming.remove(pos);
 				*state = PeerState::Enabled { connections };
 			},
-
+			st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => {
+				self.incoming.remove(pos);
+				*state = st;
+			},
 			// Any state other than `Incoming` is invalid.
 			peer => {
-				error!(target: "sub-libp2p",
+				error!(
+					target: "sub-libp2p",
 					"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
-					peer);
+					peer
+				);
+
+				self.incoming.remove(pos);
 				debug_assert!(false);
 			},
 		}
 	}
 
-	/// Function that is called when the peerset wants us to reject an incoming peer.
+	/// Function that is called when `ProtocolController` wants us to reject an incoming peer.
 	fn peerset_report_reject(&mut self, index: IncomingIndex) {
+		let _ = self.report_reject(index);
+	}
+
+	/// Function that is called when the protocol wants us to reject an incoming peer.
+	fn protocol_report_reject(&mut self, index: IncomingIndex) {
+		if let Some((set_id, peer_id)) = self.report_reject(index) {
+			self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id)
+		}
+	}
+
+	/// Function that is called when the peerset wants us to reject an incoming peer.
+	fn report_reject(&mut self, index: IncomingIndex) -> Option<(SetId, PeerId)> {
 		let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index)
 		{
 			self.incoming.remove(pos)
 		} else {
 			error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index);
-			return
+			return None
 		};
 
 		if !incoming.alive {
-			trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \
-				ignoring", index, incoming.peer_id, incoming.set_id);
-			return
+			trace!(
+				target: "sub-libp2p",
+				"PSM => Reject({:?}, {}, {:?}): Obsolete incoming, ignoring",
+				index,
+				incoming.peer_id,
+				incoming.set_id,
+			);
+
+			return None
 		}
 
 		let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) {
 			Some(s) => s,
 			None => {
-				debug_assert!(false);
-				return
+				log::debug!(
+					target: "sub-libp2p",
+					"Connection to {:?} closed, ({:?} {:?}), ignoring accept",
+					incoming.peer_id,
+					incoming.set_id,
+					index,
+				);
+				return None
 			},
 		};
 
 		match mem::replace(state, PeerState::Poisoned) {
 			// Incoming => Disabled
-			PeerState::Incoming { mut connections, backoff_until, incoming_index } => {
+			PeerState::Incoming { mut connections, backoff_until, incoming_index, .. } => {
 				if index < incoming_index {
 					warn!(
 						target: "sub-libp2p",
 						"PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.",
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
-					return
+					return None
 				} else if index > incoming_index {
 					error!(
 						target: "sub-libp2p",
@@ -952,7 +1153,7 @@ impl Notifications {
 						index, incoming.peer_id, incoming.set_id, incoming_index
 					);
 					debug_assert!(false);
-					return
+					return None
 				}
 
 				trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.",
@@ -976,10 +1177,20 @@ impl Notifications {
 				}
 
 				*state = PeerState::Disabled { connections, backoff_until };
+				Some((incoming.set_id, incoming.peer_id))
+			},
+			// connection to peer may have been closed already
+			st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => {
+				*state = st;
+				None
+			},
+			peer => {
+				error!(
+					target: LOG_TARGET,
+					"State mismatch in libp2p: Expected alive incoming. Got {peer:?}.",
+				);
+				None
 			},
-			peer => error!(target: "sub-libp2p",
-				"State mismatch in libp2p: Expected alive incoming. Got {:?}.",
-				peer),
 		}
 	}
 }
@@ -1021,6 +1232,7 @@ impl NetworkBehaviour for Notifications {
 				send_back_addr: remote_addr.clone(),
 			},
 			self.notif_protocols.clone(),
+			self.metrics.clone(),
 		))
 	}
 
@@ -1035,6 +1247,7 @@ impl NetworkBehaviour for Notifications {
 			peer,
 			ConnectedPoint::Dialer { address: addr.clone(), role_override },
 			self.notif_protocols.clone(),
+			self.metrics.clone(),
 		))
 	}
 
@@ -1195,7 +1408,12 @@ impl NetworkBehaviour for Notifications {
 						},
 
 						// Incoming => Incoming | Disabled | Backoff | Ø
-						PeerState::Incoming { mut connections, backoff_until, incoming_index } => {
+						PeerState::Incoming {
+							mut connections,
+							backoff_until,
+							incoming_index,
+							peerset_rejected,
+						} => {
 							trace!(
 								target: "sub-libp2p",
 								"Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.",
@@ -1274,6 +1492,7 @@ impl NetworkBehaviour for Notifications {
 									connections,
 									backoff_until,
 									incoming_index,
+									peerset_rejected,
 								};
 							}
 						},
@@ -1313,7 +1532,7 @@ impl NetworkBehaviour for Notifications {
 											let event = NotificationsOut::CustomProtocolReplaced {
 												peer_id,
 												set_id,
-												notifications_sink: replacement_sink,
+												notifications_sink: replacement_sink.clone(),
 											};
 											self.events.push_back(ToSwarm::GenerateEvent(event));
 										}
@@ -1474,7 +1693,7 @@ impl NetworkBehaviour for Notifications {
 		event: THandlerOutEvent<Self>,
 	) {
 		match event {
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => {
+			NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake } => {
 				let set_id = SetId::from(protocol_index);
 
 				trace!(target: "sub-libp2p",
@@ -1495,7 +1714,12 @@ impl NetworkBehaviour for Notifications {
 
 				match mem::replace(entry.get_mut(), PeerState::Poisoned) {
 					// Incoming => Incoming
-					PeerState::Incoming { mut connections, backoff_until, incoming_index } => {
+					PeerState::Incoming {
+						mut connections,
+						backoff_until,
+						incoming_index,
+						peerset_rejected,
+					} => {
 						debug_assert!(connections
 							.iter()
 							.any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)));
@@ -1523,8 +1747,12 @@ impl NetworkBehaviour for Notifications {
 							debug_assert!(false);
 						}
 
-						*entry.into_mut() =
-							PeerState::Incoming { connections, backoff_until, incoming_index };
+						*entry.into_mut() = PeerState::Incoming {
+							connections,
+							backoff_until,
+							incoming_index,
+							peerset_rejected,
+						};
 					},
 
 					PeerState::Enabled { mut connections } => {
@@ -1588,11 +1816,13 @@ impl NetworkBehaviour for Notifications {
 									set_id,
 									alive: true,
 									incoming_id,
+									handshake,
 								});
 
 								*entry.into_mut() = PeerState::Incoming {
 									connections,
 									backoff_until,
+									peerset_rejected: false,
 									incoming_index: incoming_id,
 								};
 							} else {
@@ -1725,7 +1955,7 @@ impl NetworkBehaviour for Notifications {
 								let event = NotificationsOut::CustomProtocolReplaced {
 									peer_id,
 									set_id,
-									notifications_sink: replacement_sink,
+									notifications_sink: replacement_sink.clone(),
 								};
 								self.events.push_back(ToSwarm::GenerateEvent(event));
 							}
@@ -1830,8 +2060,13 @@ impl NetworkBehaviour for Notifications {
 									peer_id,
 									set_id,
 									inbound,
-									negotiated_fallback,
-									received_handshake,
+									direction: if inbound {
+										Direction::Inbound
+									} else {
+										Direction::Outbound
+									},
+									received_handshake: received_handshake.clone(),
+									negotiated_fallback: negotiated_fallback.clone(),
 									notifications_sink: notifications_sink.clone(),
 								};
 								self.events.push_back(ToSwarm::GenerateEvent(event));
@@ -1979,8 +2214,11 @@ impl NetworkBehaviour for Notifications {
 						peer_id,
 						set_id,
 					);
-					let event = NotificationsOut::Notification { peer_id, set_id, message };
-
+					let event = NotificationsOut::Notification {
+						peer_id,
+						set_id,
+						message: message.clone(),
+					};
 					self.events.push_back(ToSwarm::GenerateEvent(event));
 				} else {
 					trace!(
@@ -2009,10 +2247,10 @@ impl NetworkBehaviour for Notifications {
 		loop {
 			match futures::Stream::poll_next(Pin::new(&mut self.from_protocol_controllers), cx) {
 				Poll::Ready(Some(Message::Accept(index))) => {
-					self.peerset_report_accept(index);
+					self.peerset_report_preaccept(index);
 				},
 				Poll::Ready(Some(Message::Reject(index))) => {
-					self.peerset_report_reject(index);
+					let _ = self.peerset_report_reject(index);
 				},
 				Poll::Ready(Some(Message::Connect { peer_id, set_id, .. })) => {
 					self.peerset_report_connect(peer_id, set_id);
@@ -2031,6 +2269,43 @@ impl NetworkBehaviour for Notifications {
 			}
 		}
 
+		// poll commands from protocols
+		loop {
+			match futures::Stream::poll_next(Pin::new(&mut self.command_streams), cx) {
+				Poll::Ready(Some((set_id, command))) => match command {
+					NotificationCommand::SetHandshake(handshake) => {
+						self.set_notif_protocol_handshake(set_id.into(), handshake);
+					},
+					NotificationCommand::OpenSubstream(_peer) |
+					NotificationCommand::CloseSubstream(_peer) => {
+						todo!("substream control not implemented");
+					},
+				},
+				Poll::Ready(None) => {
+					error!(target: LOG_TARGET, "Protocol command streams have been shut down");
+					break
+				},
+				Poll::Pending => break,
+			}
+		}
+
+		while let Poll::Ready(Some((result, index))) =
+			self.pending_inbound_validations.poll_next_unpin(cx)
+		{
+			match result {
+				Ok(ValidationResult::Accept) => {
+					self.protocol_report_accept(index);
+				},
+				Ok(ValidationResult::Reject) => {
+					self.protocol_report_reject(index);
+				},
+				Err(_) => {
+					error!(target: LOG_TARGET, "Protocol has shut down");
+					break
+				},
+			}
+		}
+
 		while let Poll::Ready(Some((delay_id, peer_id, set_id))) =
 			Pin::new(&mut self.delays).poll_next(cx)
 		{
@@ -2153,7 +2428,10 @@ mod tests {
 		}
 	}
 
-	fn development_notifs() -> (Notifications, ProtocolController) {
+	fn development_notifs(
+	) -> (Notifications, ProtocolController, Box<dyn crate::service::traits::NotificationService>) {
+		let (protocol_handle_pair, notif_service) =
+			crate::protocol::notifications::service::notification_service("/proto/1".into());
 		let (to_notifications, from_controller) =
 			tracing_unbounded("test_controller_to_notifications", 10_000);
 
@@ -2169,24 +2447,31 @@ mod tests {
 			Box::new(MockPeerStore {}),
 		);
 
+		let (notif_handle, command_stream) = protocol_handle_pair.split();
 		(
 			Notifications::new(
 				vec![handle],
 				from_controller,
-				iter::once(ProtocolConfig {
-					name: "/foo".into(),
-					fallback_names: Vec::new(),
-					handshake: vec![1, 2, 3, 4],
-					max_notification_size: u64::MAX,
-				}),
+				&None,
+				iter::once((
+					ProtocolConfig {
+						name: "/foo".into(),
+						fallback_names: Vec::new(),
+						handshake: vec![1, 2, 3, 4],
+						max_notification_size: u64::MAX,
+					},
+					notif_handle,
+					command_stream,
+				)),
 			),
 			controller,
+			notif_service,
 		)
 	}
 
 	#[test]
 	fn update_handshake() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
 		let inner = notif.notif_protocols.get_mut(0).unwrap().handshake.read().clone();
 		assert_eq!(inner, vec![1, 2, 3, 4]);
@@ -2201,14 +2486,14 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn update_unknown_handshake() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
 		notif.set_notif_protocol_handshake(1337.into(), vec![5, 6, 7, 8]);
 	}
 
 	#[test]
 	fn disconnect_backoff_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
 		let peer = PeerId::random();
 		notif.peers.insert(
@@ -2225,7 +2510,7 @@ mod tests {
 
 	#[test]
 	fn disconnect_pending_request() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 
 		notif.peers.insert(
@@ -2242,7 +2527,7 @@ mod tests {
 
 	#[test]
 	fn disconnect_requested_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
 		let peer = PeerId::random();
 		notif.peers.insert((peer, 0.into()), PeerState::Requested);
@@ -2253,7 +2538,7 @@ mod tests {
 
 	#[test]
 	fn disconnect_disabled_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		notif.peers.insert(
 			(peer, 0.into()),
@@ -2269,7 +2554,7 @@ mod tests {
 
 	#[test]
 	fn remote_opens_connection_and_substream() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let connected = ConnectedPoint::Listener {
@@ -2299,7 +2584,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 
 		if let Some(&PeerState::Incoming { ref connections, backoff_until: None, .. }) =
@@ -2319,7 +2607,7 @@ mod tests {
 
 	#[tokio::test]
 	async fn disconnect_remote_substream_before_handled_by_controller() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let connected = ConnectedPoint::Listener {
@@ -2339,7 +2627,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		notif.disconnect_peer(&peer, 0.into());
 
@@ -2355,7 +2646,7 @@ mod tests {
 
 	#[test]
 	fn peerset_report_connect_backoff() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -2393,7 +2684,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -2420,7 +2711,7 @@ mod tests {
 
 	#[test]
 	fn peerset_connect_incoming() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2444,19 +2735,22 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 
 		// attempt to connect to the peer and verify that the peer state is `Enabled`;
 		// we rely on implementation detail that incoming indices are counted from 0
 		// to not mock the `Peerset`
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. })));
 	}
 
 	#[test]
 	fn peerset_disconnect_disable_pending_enable() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -2503,7 +2797,7 @@ mod tests {
 
 	#[test]
 	fn peerset_disconnect_enabled() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2525,11 +2819,14 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		// we rely on the implementation detail that incoming indices are counted from 0
 		// to not mock the `Peerset`
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. })));
 
 		// disconnect peer and verify that the state is `Disabled`
@@ -2539,7 +2836,7 @@ mod tests {
 
 	#[test]
 	fn peerset_disconnect_requested() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 
@@ -2554,7 +2851,7 @@ mod tests {
 
 	#[test]
 	fn peerset_disconnect_pending_request() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -2587,7 +2884,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -2607,7 +2904,7 @@ mod tests {
 
 	#[test]
 	fn peerset_accept_peer_not_alive() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2631,7 +2928,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -2647,14 +2947,14 @@ mod tests {
 			IncomingPeer { alive: false, incoming_id: IncomingIndex(0), .. },
 		));
 
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 		assert_eq!(notif.incoming.len(), 0);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(PeerState::Disabled { .. })));
 	}
 
 	#[test]
 	fn secondary_connection_peer_state_incoming() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let conn2 = ConnectionId::new_unchecked(1);
@@ -2678,7 +2978,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		if let Some(PeerState::Incoming { connections, .. }) = notif.peers.get(&(peer, set_id)) {
 			assert_eq!(connections.len(), 1);
@@ -2709,7 +3012,7 @@ mod tests {
 
 	#[test]
 	fn close_connection_for_disabled_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2734,7 +3037,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -2743,7 +3046,7 @@ mod tests {
 
 	#[test]
 	fn close_connection_for_incoming_peer_one_connection() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2766,7 +3069,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -2775,7 +3081,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -2788,7 +3094,7 @@ mod tests {
 
 	#[test]
 	fn close_connection_for_incoming_peer_two_connections() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let conn1 = ConnectionId::new_unchecked(1);
@@ -2815,7 +3121,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -2842,7 +3151,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -2857,7 +3166,7 @@ mod tests {
 
 	#[test]
 	fn connection_and_substream_open() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -2882,13 +3191,16 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
 		// We rely on the implementation detail that incoming indices are counted
 		// from 0 to not mock the `Peerset`.
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. })));
 
 		// open new substream
@@ -2911,7 +3223,7 @@ mod tests {
 
 	#[test]
 	fn connection_closed_sink_replaced() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn1 = ConnectionId::new_unchecked(0);
 		let conn2 = ConnectionId::new_unchecked(1);
@@ -2947,7 +3259,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn2,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 
 		if let Some(PeerState::Enabled { connections, .. }) = notif.peers.get(&(peer, set_id)) {
@@ -2984,7 +3299,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn1,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3005,7 +3320,7 @@ mod tests {
 
 	#[test]
 	fn dial_failure_for_requested_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 
@@ -3028,7 +3343,7 @@ mod tests {
 
 	#[tokio::test]
 	async fn write_notification() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -3077,7 +3392,7 @@ mod tests {
 
 	#[test]
 	fn peerset_report_connect_backoff_expired() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -3110,7 +3425,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3125,7 +3440,7 @@ mod tests {
 
 	#[test]
 	fn peerset_report_disconnect_disabled() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
@@ -3151,7 +3466,7 @@ mod tests {
 
 	#[test]
 	fn peerset_report_disconnect_backoff() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -3184,7 +3499,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3197,7 +3512,7 @@ mod tests {
 
 	#[test]
 	fn peer_is_backed_off_if_both_connections_get_closed_while_peer_is_disabled_with_back_off() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn1 = ConnectionId::new_unchecked(0);
@@ -3247,7 +3562,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn1,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected.clone(), vec![]),
+				handler: NotifsHandler::new(peer, connected.clone(), vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3261,7 +3576,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn2,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3270,7 +3585,7 @@ mod tests {
 
 	#[test]
 	fn inject_connection_closed_incoming_with_backoff() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
@@ -3294,7 +3609,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 
 		// manually add backoff for the entry
@@ -3312,7 +3630,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3321,7 +3639,7 @@ mod tests {
 
 	#[test]
 	fn two_connections_inactive_connection_gets_closed_peer_state_is_still_incoming() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn1 = ConnectionId::new_unchecked(0);
 		let conn2 = ConnectionId::new_unchecked(1);
@@ -3355,7 +3673,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn1,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(
 			notif.peers.get_mut(&(peer, 0.into())),
@@ -3367,7 +3688,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn2,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3376,7 +3697,7 @@ mod tests {
 
 	#[test]
 	fn two_connections_active_connection_gets_closed_peer_state_is_disabled() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn1 = ConnectionId::new_unchecked(0);
 		let conn2 = ConnectionId::new_unchecked(1);
@@ -3413,7 +3734,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn1,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(
 			notif.peers.get_mut(&(peer, 0.into())),
@@ -3425,7 +3749,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn1,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3434,7 +3758,7 @@ mod tests {
 
 	#[test]
 	fn inject_connection_closed_for_active_connection() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn1 = ConnectionId::new_unchecked(0);
 		let conn2 = ConnectionId::new_unchecked(1);
@@ -3494,7 +3818,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn1,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3502,7 +3826,7 @@ mod tests {
 
 	#[test]
 	fn inject_dial_failure_for_pending_request() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -3535,7 +3859,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3565,7 +3889,7 @@ mod tests {
 
 	#[test]
 	fn peerstate_incoming_open_desired_by_remote() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn1 = ConnectionId::new_unchecked(0);
@@ -3599,7 +3923,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn1,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -3607,7 +3934,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn2,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 
 		if let Some(PeerState::Incoming { ref connections, .. }) = notif.peers.get(&(peer, set_id))
@@ -3619,7 +3949,7 @@ mod tests {
 
 	#[tokio::test]
 	async fn remove_backoff_peer_after_timeout() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
@@ -3652,7 +3982,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3697,7 +4027,7 @@ mod tests {
 
 	#[tokio::test]
 	async fn reschedule_disabled_pending_enable_when_connection_not_closed() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -3726,13 +4056,16 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
 		// we rely on the implementation detail that incoming indices are counted from 0
 		// to not mock the `Peerset`
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. })));
 
 		let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]);
@@ -3815,7 +4148,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn peerset_report_connect_with_enabled_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -3840,7 +4173,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -3865,7 +4201,7 @@ mod tests {
 	#[test]
 	#[cfg(debug_assertions)]
 	fn peerset_report_connect_with_disabled_pending_enable_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -3911,7 +4247,7 @@ mod tests {
 	#[test]
 	#[cfg(debug_assertions)]
 	fn peerset_report_connect_with_requested_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 
@@ -3927,7 +4263,7 @@ mod tests {
 	#[test]
 	#[cfg(debug_assertions)]
 	fn peerset_report_connect_with_pending_requested() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -3960,7 +4296,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -3984,7 +4320,7 @@ mod tests {
 	#[test]
 	#[cfg(debug_assertions)]
 	fn peerset_report_connect_with_incoming_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
@@ -4008,7 +4344,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -4019,7 +4358,7 @@ mod tests {
 	#[test]
 	#[cfg(debug_assertions)]
 	fn peerset_report_disconnect_with_incoming_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
@@ -4043,7 +4382,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -4052,13 +4394,68 @@ mod tests {
 	}
 
 	#[test]
-	#[should_panic]
 	#[cfg(debug_assertions)]
-	fn peerset_report_accept_incoming_peer() {
-		let (mut notif, _controller) = development_notifs();
+	fn peerset_report_disconnect_with_incoming_peer_protocol_accepts() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
+		let set_id = SetId::from(0);
 		let conn = ConnectionId::new_unchecked(0);
+		let connected = ConnectedPoint::Listener {
+			local_addr: Multiaddr::empty(),
+			send_back_addr: Multiaddr::empty(),
+		};
+
+		notif.on_swarm_event(FromSwarm::ConnectionEstablished(
+			libp2p::swarm::behaviour::ConnectionEstablished {
+				peer_id: peer,
+				connection_id: conn,
+				endpoint: &connected,
+				failed_addresses: &[],
+				other_established: 0usize,
+			},
+		));
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. })));
+
+		// remote opens a substream, verify that peer state is updated to `Incoming`
+		notif.on_connection_handler_event(
+			peer,
+			conn,
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
+		);
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
+
+		// `Peerset` wants to disconnect the peer but since it's still under validation,
+		// it won't be disabled automatically
+		notif.peerset_report_disconnect(peer, set_id);
+
+		let incoming_index = match notif.peers.get(&(peer, set_id)) {
+			Some(&PeerState::Incoming { peerset_rejected, incoming_index, .. }) => {
+				assert!(peerset_rejected);
+				incoming_index
+			},
+			state => panic!("invalid state: {state:?}"),
+		};
+
+		// protocol accepted peer but since `Peerset` wanted to disconnect it, the peer will be
+		// disabled
+		notif.protocol_report_accept(incoming_index);
+
+		match notif.peers.get(&(peer, set_id)) {
+			Some(&PeerState::Disabled { .. }) => {},
+			state => panic!("invalid state: {state:?}"),
+		};
+	}
+
+	#[test]
+	#[cfg(debug_assertions)]
+	fn peer_disconnected_protocol_accepts() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
+		let peer = PeerId::random();
 		let set_id = SetId::from(0);
+		let conn = ConnectionId::new_unchecked(0);
 		let connected = ConnectedPoint::Listener {
 			local_addr: Multiaddr::empty(),
 			send_back_addr: Multiaddr::empty(),
@@ -4079,24 +4476,188 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
-		assert!(std::matches!(
-			notif.incoming[0],
-			IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. },
+		assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0)));
+		notif.disconnect_peer(&peer, set_id);
+
+		// since the connection was closed, nothing happens for the peer state because
+		// there is nothing actionable
+		notif.protocol_report_accept(IncomingIndex(0));
+
+		match notif.peers.get(&(peer, set_id)) {
+			Some(&PeerState::Disabled { .. }) => {},
+			state => panic!("invalid state: {state:?}"),
+		};
+
+		assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0)));
+	}
+
+	#[test]
+	#[cfg(debug_assertions)]
+	fn connection_closed_protocol_accepts() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
+		let peer = PeerId::random();
+		let set_id = SetId::from(0);
+		let conn = ConnectionId::new_unchecked(0);
+		let connected = ConnectedPoint::Listener {
+			local_addr: Multiaddr::empty(),
+			send_back_addr: Multiaddr::empty(),
+		};
+
+		notif.on_swarm_event(FromSwarm::ConnectionEstablished(
+			libp2p::swarm::behaviour::ConnectionEstablished {
+				peer_id: peer,
+				connection_id: conn,
+				endpoint: &connected,
+				failed_addresses: &[],
+				other_established: 0usize,
+			},
+		));
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. })));
+
+		// remote opens a substream, verify that peer state is updated to `Incoming`
+		notif.on_connection_handler_event(
+			peer,
+			conn,
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
+		);
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
+
+		notif.on_swarm_event(FromSwarm::ConnectionClosed(
+			libp2p::swarm::behaviour::ConnectionClosed {
+				peer_id: peer,
+				connection_id: ConnectionId::new_unchecked(0),
+				endpoint: &connected.clone(),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
+				remaining_established: 0usize,
+			},
 		));
 
-		notif.peers.remove(&(peer, set_id));
-		notif.peerset_report_accept(IncomingIndex(0));
+		// connection closed, nothing to do
+		notif.protocol_report_accept(IncomingIndex(0));
+
+		match notif.peers.get(&(peer, set_id)) {
+			None => {},
+			state => panic!("invalid state: {state:?}"),
+		};
+	}
+
+	#[test]
+	#[cfg(debug_assertions)]
+	fn peer_disconnected_protocol_reject() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
+		let peer = PeerId::random();
+		let set_id = SetId::from(0);
+		let conn = ConnectionId::new_unchecked(0);
+		let connected = ConnectedPoint::Listener {
+			local_addr: Multiaddr::empty(),
+			send_back_addr: Multiaddr::empty(),
+		};
+
+		notif.on_swarm_event(FromSwarm::ConnectionEstablished(
+			libp2p::swarm::behaviour::ConnectionEstablished {
+				peer_id: peer,
+				connection_id: conn,
+				endpoint: &connected,
+				failed_addresses: &[],
+				other_established: 0usize,
+			},
+		));
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. })));
+
+		// remote opens a substream, verify that peer state is updated to `Incoming`
+		notif.on_connection_handler_event(
+			peer,
+			conn,
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
+		);
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
+
+		assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0)));
+		notif.disconnect_peer(&peer, set_id);
+
+		// since the connection was closed, nothing happens for the peer state because
+		// there is nothing actionable
+		notif.protocol_report_reject(IncomingIndex(0));
+
+		match notif.peers.get(&(peer, set_id)) {
+			Some(&PeerState::Disabled { .. }) => {},
+			state => panic!("invalid state: {state:?}"),
+		};
+
+		assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0)));
+	}
+
+	#[test]
+	#[cfg(debug_assertions)]
+	fn connection_closed_protocol_rejects() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
+		let peer = PeerId::random();
+		let set_id = SetId::from(0);
+		let conn = ConnectionId::new_unchecked(0);
+		let connected = ConnectedPoint::Listener {
+			local_addr: Multiaddr::empty(),
+			send_back_addr: Multiaddr::empty(),
+		};
+
+		notif.on_swarm_event(FromSwarm::ConnectionEstablished(
+			libp2p::swarm::behaviour::ConnectionEstablished {
+				peer_id: peer,
+				connection_id: conn,
+				endpoint: &connected,
+				failed_addresses: &[],
+				other_established: 0usize,
+			},
+		));
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. })));
+
+		// remote opens a substream, verify that peer state is updated to `Incoming`
+		notif.on_connection_handler_event(
+			peer,
+			conn,
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
+		);
+		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
+
+		notif.on_swarm_event(FromSwarm::ConnectionClosed(
+			libp2p::swarm::behaviour::ConnectionClosed {
+				peer_id: peer,
+				connection_id: ConnectionId::new_unchecked(0),
+				endpoint: &connected.clone(),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
+				remaining_established: 0usize,
+			},
+		));
+
+		// connection closed, nothing to do
+		notif.protocol_report_reject(IncomingIndex(0));
+
+		match notif.peers.get(&(peer, set_id)) {
+			None => {},
+			state => panic!("invalid state: {state:?}"),
+		};
 	}
 
 	#[test]
 	#[should_panic]
 	#[cfg(debug_assertions)]
-	fn peerset_report_accept_not_incoming_peer() {
-		let (mut notif, _controller) = development_notifs();
+	fn protocol_report_accept_not_incoming_peer() {
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -4121,7 +4682,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -4138,14 +4702,14 @@ mod tests {
 
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. })));
 		notif.incoming[0].alive = true;
-		notif.peerset_report_accept(IncomingIndex(0));
+		notif.protocol_report_accept(IncomingIndex(0));
 	}
 
 	#[test]
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_connection_closed_non_existent_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let endpoint = ConnectedPoint::Listener {
 			local_addr: Multiaddr::empty(),
@@ -4157,7 +4721,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: ConnectionId::new_unchecked(0),
 				endpoint: &endpoint.clone(),
-				handler: NotifsHandler::new(peer, endpoint, vec![]),
+				handler: NotifsHandler::new(peer, endpoint, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4165,7 +4729,7 @@ mod tests {
 
 	#[test]
 	fn disconnect_non_existent_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let set_id = SetId::from(0);
 
@@ -4177,9 +4741,9 @@ mod tests {
 
 	#[test]
 	fn accept_non_existent_connection() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
-		notif.peerset_report_accept(0.into());
+		notif.protocol_report_accept(0.into());
 
 		assert!(notif.peers.is_empty());
 		assert!(notif.incoming.is_empty());
@@ -4187,9 +4751,9 @@ mod tests {
 
 	#[test]
 	fn reject_non_existent_connection() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 
-		notif.peerset_report_reject(0.into());
+		notif.protocol_report_reject(0.into());
 
 		assert!(notif.peers.is_empty());
 		assert!(notif.incoming.is_empty());
@@ -4197,7 +4761,7 @@ mod tests {
 
 	#[test]
 	fn reject_non_active_connection() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -4221,61 +4785,24 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
 		notif.incoming[0].alive = false;
-		notif.peerset_report_reject(0.into());
-
-		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
-	}
-
-	#[test]
-	#[should_panic]
-	#[cfg(debug_assertions)]
-	fn reject_non_existent_peer_but_alive_connection() {
-		let (mut notif, _controller) = development_notifs();
-		let peer = PeerId::random();
-		let conn = ConnectionId::new_unchecked(0);
-		let set_id = SetId::from(0);
-		let connected = ConnectedPoint::Listener {
-			local_addr: Multiaddr::empty(),
-			send_back_addr: Multiaddr::empty(),
-		};
-
-		notif.on_swarm_event(FromSwarm::ConnectionEstablished(
-			libp2p::swarm::behaviour::ConnectionEstablished {
-				peer_id: peer,
-				connection_id: conn,
-				endpoint: &connected,
-				failed_addresses: &[],
-				other_established: 0usize,
-			},
-		));
-		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. })));
+		notif.protocol_report_reject(0.into());
 
-		// remote opens a substream, verify that peer state is updated to `Incoming`
-		notif.on_connection_handler_event(
-			peer,
-			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
-		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
-		assert!(std::matches!(
-			notif.incoming[0],
-			IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. },
-		));
-
-		notif.peers.remove(&(peer, set_id));
-		notif.peerset_report_reject(0.into());
 	}
 
 	#[test]
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_non_existent_connection_closed_for_incoming_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -4299,7 +4826,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -4308,7 +4838,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: ConnectionId::new_unchecked(1337),
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4318,7 +4848,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_non_existent_connection_closed_for_disabled_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -4343,7 +4873,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: ConnectionId::new_unchecked(1337),
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4353,7 +4883,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_non_existent_connection_closed_for_disabled_pending_enable() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -4394,7 +4924,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: ConnectionId::new_unchecked(1337),
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4404,7 +4934,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_connection_closed_for_incoming_peer_state_mismatch() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -4428,7 +4958,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 		notif.incoming[0].alive = false;
@@ -4438,7 +4971,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4448,7 +4981,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_connection_closed_for_enabled_state_mismatch() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
 		let set_id = SetId::from(0);
@@ -4472,7 +5005,10 @@ mod tests {
 		notif.on_connection_handler_event(
 			peer,
 			conn,
-			NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+			NotifsHandlerOut::OpenDesiredByRemote {
+				protocol_index: 0,
+				handshake: vec![1, 3, 3, 7],
+			},
 		);
 		assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. })));
 
@@ -4485,7 +5021,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: ConnectionId::new_unchecked(1337),
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4495,7 +5031,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn inject_connection_closed_for_backoff_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let set_id = SetId::from(0);
 		let peer = PeerId::random();
 		let conn = ConnectionId::new_unchecked(0);
@@ -4528,7 +5064,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected.clone(), vec![]),
+				handler: NotifsHandler::new(peer, connected.clone(), vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4539,7 +5075,7 @@ mod tests {
 				peer_id: peer,
 				connection_id: conn,
 				endpoint: &connected.clone(),
-				handler: NotifsHandler::new(peer, connected, vec![]),
+				handler: NotifsHandler::new(peer, connected, vec![], None),
 				remaining_established: 0usize,
 			},
 		));
@@ -4549,7 +5085,7 @@ mod tests {
 	#[should_panic]
 	#[cfg(debug_assertions)]
 	fn open_result_ok_non_existent_peer() {
-		let (mut notif, _controller) = development_notifs();
+		let (mut notif, _controller, _notif_service) = development_notifs();
 		let conn = ConnectionId::new_unchecked(0);
 		let connected = ConnectedPoint::Listener {
 			local_addr: Multiaddr::empty(),
diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs
index cffdec7d71e..28662be29fe 100644
--- a/substrate/client/network/src/protocol/notifications/handler.rs
+++ b/substrate/client/network/src/protocol/notifications/handler.rs
@@ -58,9 +58,12 @@
 //! [`NotifsHandlerIn::Open`] has gotten an answer.
 
 use crate::{
-	protocol::notifications::upgrade::{
-		NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
-		UpgradeCollec,
+	protocol::notifications::{
+		service::metrics,
+		upgrade::{
+			NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
+			UpgradeCollec,
+		},
 	},
 	types::ProtocolName,
 };
@@ -92,7 +95,7 @@ use std::{
 
 /// Number of pending notifications in asynchronous contexts.
 /// See [`NotificationsSink::reserve_notification`] for context.
-const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
+pub(crate) const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8;
 
 /// Number of pending notifications in synchronous contexts.
 const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048;
@@ -126,11 +129,19 @@ pub struct NotifsHandler {
 	events_queue: VecDeque<
 		ConnectionHandlerEvent<NotificationsOut, usize, NotifsHandlerOut, NotifsHandlerError>,
 	>,
+
+	/// Metrics.
+	metrics: Option<Arc<metrics::Metrics>>,
 }
 
 impl NotifsHandler {
 	/// Creates new [`NotifsHandler`].
-	pub fn new(peer_id: PeerId, endpoint: ConnectedPoint, protocols: Vec<ProtocolConfig>) -> Self {
+	pub fn new(
+		peer_id: PeerId,
+		endpoint: ConnectedPoint,
+		protocols: Vec<ProtocolConfig>,
+		metrics: Option<metrics::Metrics>,
+	) -> Self {
 		Self {
 			protocols: protocols
 				.into_iter()
@@ -148,6 +159,7 @@ impl NotifsHandler {
 			endpoint,
 			when_connection_open: Instant::now(),
 			events_queue: VecDeque::with_capacity(16),
+			metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))),
 		}
 	}
 }
@@ -303,6 +315,8 @@ pub enum NotifsHandlerOut {
 	OpenDesiredByRemote {
 		/// Index of the protocol in the list of protocols passed at initialization.
 		protocol_index: usize,
+		/// Received handshake.
+		handshake: Vec<u8>,
 	},
 
 	/// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in
@@ -331,6 +345,36 @@ pub enum NotifsHandlerOut {
 #[derive(Debug, Clone)]
 pub struct NotificationsSink {
 	inner: Arc<NotificationsSinkInner>,
+	metrics: Option<Arc<metrics::Metrics>>,
+}
+
+impl NotificationsSink {
+	/// Create new [`NotificationsSink`].
+	/// NOTE: only used for testing but must be `pub` as other crates in `client/network` use this.
+	pub fn new(
+		peer_id: PeerId,
+	) -> (Self, mpsc::Receiver<NotificationsSinkMessage>, mpsc::Receiver<NotificationsSinkMessage>)
+	{
+		let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE);
+		let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE);
+		(
+			NotificationsSink {
+				inner: Arc::new(NotificationsSinkInner {
+					peer_id,
+					async_channel: FuturesMutex::new(async_tx),
+					sync_channel: Mutex::new(Some(sync_tx)),
+				}),
+				metrics: None,
+			},
+			async_rx,
+			sync_rx,
+		)
+	}
+
+	/// Get reference to metrics.
+	pub fn metrics(&self) -> &Option<Arc<metrics::Metrics>> {
+		&self.metrics
+	}
 }
 
 #[derive(Debug)]
@@ -350,8 +394,8 @@ struct NotificationsSinkInner {
 
 /// Message emitted through the [`NotificationsSink`] and processed by the background task
 /// dedicated to the peer.
-#[derive(Debug)]
-enum NotificationsSinkMessage {
+#[derive(Debug, PartialEq, Eq)]
+pub enum NotificationsSinkMessage {
 	/// Message emitted by [`NotificationsSink::reserve_notification`] and
 	/// [`NotificationsSink::write_notification_now`].
 	Notification { message: Vec<u8> },
@@ -379,8 +423,8 @@ impl NotificationsSink {
 		let mut lock = self.inner.sync_channel.lock();
 
 		if let Some(tx) = lock.as_mut() {
-			let result =
-				tx.try_send(NotificationsSinkMessage::Notification { message: message.into() });
+			let message = message.into();
+			let result = tx.try_send(NotificationsSinkMessage::Notification { message });
 
 			if result.is_err() {
 				// Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the
@@ -476,7 +520,10 @@ impl ConnectionHandler for NotifsHandler {
 				match protocol_info.state {
 					State::Closed { pending_opening } => {
 						self.events_queue.push_back(ConnectionHandlerEvent::Custom(
-							NotifsHandlerOut::OpenDesiredByRemote { protocol_index },
+							NotifsHandlerOut::OpenDesiredByRemote {
+								protocol_index,
+								handshake: in_substream_open.handshake,
+							},
 						));
 
 						protocol_info.state = State::OpenDesiredByRemote {
@@ -531,6 +578,7 @@ impl ConnectionHandler for NotifsHandler {
 								async_channel: FuturesMutex::new(async_tx),
 								sync_channel: Mutex::new(Some(sync_tx)),
 							}),
+							metrics: self.metrics.clone(),
 						};
 
 						self.protocols[protocol_index].state = State::Open {
@@ -881,6 +929,7 @@ pub mod tests {
 					async_channel: FuturesMutex::new(async_tx),
 					sync_channel: Mutex::new(Some(sync_tx)),
 				}),
+				metrics: None,
 			};
 			let (in_substream, out_substream) = MockSubstream::new();
 
@@ -1040,6 +1089,7 @@ pub mod tests {
 			},
 			peer_id: PeerId::random(),
 			events_queue: VecDeque::new(),
+			metrics: None,
 		}
 	}
 
@@ -1545,6 +1595,7 @@ pub mod tests {
 				async_channel: FuturesMutex::new(async_tx),
 				sync_channel: Mutex::new(Some(sync_tx)),
 			}),
+			metrics: None,
 		};
 
 		handler.protocols[0].state = State::Open {
@@ -1597,7 +1648,7 @@ pub mod tests {
 			assert!(std::matches!(
 				handler.poll(cx),
 				Poll::Ready(ConnectionHandlerEvent::Custom(
-					NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 },
+					NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0, .. },
 				))
 			));
 			assert!(std::matches!(
diff --git a/substrate/client/network/src/protocol/notifications/service/metrics.rs b/substrate/client/network/src/protocol/notifications/service/metrics.rs
new file mode 100644
index 00000000000..2a57d57c175
--- /dev/null
+++ b/substrate/client/network/src/protocol/notifications/service/metrics.rs
@@ -0,0 +1,130 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use crate::types::ProtocolName;
+
+use prometheus_endpoint::{
+	self as prometheus, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry,
+	U64,
+};
+
+use std::sync::Arc;
+
+/// Notification metrics.
+#[derive(Debug, Clone)]
+pub struct Metrics {
+	// Total number of opened substreams.
+	pub notifications_streams_opened_total: CounterVec<U64>,
+
+	/// Total number of closed substreams.
+	pub notifications_streams_closed_total: CounterVec<U64>,
+
+	/// In/outbound notification sizes.
+	pub notifications_sizes: HistogramVec,
+}
+
+impl Metrics {
+	fn register(registry: &Registry) -> Result<Self, PrometheusError> {
+		Ok(Self {
+			notifications_sizes: prometheus::register(
+				HistogramVec::new(
+					HistogramOpts {
+						common_opts: Opts::new(
+							"substrate_sub_libp2p_notifications_sizes",
+							"Sizes of the notifications send to and received from all nodes",
+						),
+						buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
+							.expect("parameters are always valid values; qed"),
+					},
+					&["direction", "protocol"],
+				)?,
+				registry,
+			)?,
+			notifications_streams_closed_total: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"substrate_sub_libp2p_notifications_streams_closed_total",
+						"Total number of notification substreams that have been closed",
+					),
+					&["protocol"],
+				)?,
+				registry,
+			)?,
+			notifications_streams_opened_total: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"substrate_sub_libp2p_notifications_streams_opened_total",
+						"Total number of notification substreams that have been opened",
+					),
+					&["protocol"],
+				)?,
+				registry,
+			)?,
+		})
+	}
+}
+
+/// Register metrics.
+pub fn register(registry: &Registry) -> Result<Metrics, PrometheusError> {
+	Metrics::register(registry)
+}
+
+/// Register opened substream to Prometheus.
+pub fn register_substream_opened(metrics: &Option<Metrics>, protocol: &ProtocolName) {
+	if let Some(metrics) = metrics {
+		metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc();
+	}
+}
+
+/// Register closed substream to Prometheus.
+pub fn register_substream_closed(metrics: &Option<Metrics>, protocol: &ProtocolName) {
+	if let Some(metrics) = metrics {
+		metrics
+			.notifications_streams_closed_total
+			.with_label_values(&[&protocol[..]])
+			.inc();
+	}
+}
+
+/// Register sent notification to Prometheus.
+pub fn register_notification_sent(
+	metrics: &Option<Arc<Metrics>>,
+	protocol: &ProtocolName,
+	size: usize,
+) {
+	if let Some(metrics) = metrics {
+		metrics
+			.notifications_sizes
+			.with_label_values(&["out", protocol])
+			.observe(size as f64);
+	}
+}
+
+/// Register received notification to Prometheus.
+pub fn register_notification_received(
+	metrics: &Option<Metrics>,
+	protocol: &ProtocolName,
+	size: usize,
+) {
+	if let Some(metrics) = metrics {
+		metrics
+			.notifications_sizes
+			.with_label_values(&["in", protocol])
+			.observe(size as f64);
+	}
+}
diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs
new file mode 100644
index 00000000000..62e6d88a3d5
--- /dev/null
+++ b/substrate/client/network/src/protocol/notifications/service/mod.rs
@@ -0,0 +1,634 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Notification service implementation.
+
+use crate::{
+	error,
+	protocol::notifications::handler::NotificationsSink,
+	service::traits::{
+		Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult,
+	},
+	types::ProtocolName,
+};
+
+use futures::{
+	stream::{FuturesUnordered, Stream},
+	StreamExt,
+};
+use libp2p::PeerId;
+use parking_lot::Mutex;
+use tokio::sync::{mpsc, oneshot};
+use tokio_stream::wrappers::ReceiverStream;
+
+use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
+
+use std::{collections::HashMap, fmt::Debug, sync::Arc};
+
+pub(crate) mod metrics;
+
+#[cfg(test)]
+mod tests;
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
+/// Default command queue size.
+const COMMAND_QUEUE_SIZE: usize = 64;
+
+/// Type representing subscribers of a notification protocol.
+type Subscribers = Arc<Mutex<Vec<TracingUnboundedSender<InnerNotificationEvent>>>>;
+
+/// Type represending a distributable message sink.
+/// Detached message sink must carry the protocol name for registering metrics.
+///
+/// See documentation for [`PeerContext`] for more details.
+type NotificationSink = Arc<Mutex<(NotificationsSink, ProtocolName)>>;
+
+#[async_trait::async_trait]
+impl MessageSink for NotificationSink {
+	/// Send synchronous `notification` to the peer associated with this [`MessageSink`].
+	fn send_sync_notification(&self, notification: Vec<u8>) {
+		let sink = self.lock();
+
+		metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification.len());
+		sink.0.send_sync_notification(notification);
+	}
+
+	/// Send an asynchronous `notification` to the peer associated with this [`MessageSink`],
+	/// allowing sender to exercise backpressure.
+	///
+	/// Returns an error if the peer does not exist.
+	async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), error::Error> {
+		// notification sink must be cloned because the lock cannot be held across `.await`
+		// this makes the implementation less efficient but not prohibitively so as the same
+		// method is also used by `NetworkService` when sending notifications.
+		let notification_len = notification.len();
+		let sink = self.lock().clone();
+		let permit = sink
+			.0
+			.reserve_notification()
+			.await
+			.map_err(|_| error::Error::ConnectionClosed)?;
+
+		permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| {
+			metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification_len);
+			res
+		})
+	}
+}
+
+/// Inner notification event to deal with `NotificationsSinks` without exposing that
+/// implementation detail to [`NotificationService`] consumers.
+#[derive(Debug)]
+enum InnerNotificationEvent {
+	/// Validate inbound substream.
+	ValidateInboundSubstream {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Received handshake.
+		handshake: Vec<u8>,
+
+		/// `oneshot::Sender` for sending validation result back to `Notifications`
+		result_tx: oneshot::Sender<ValidationResult>,
+	},
+
+	/// Notification substream open to `peer`.
+	NotificationStreamOpened {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Direction of the substream.
+		direction: Direction,
+
+		/// Received handshake.
+		handshake: Vec<u8>,
+
+		/// Negotiated fallback.
+		negotiated_fallback: Option<ProtocolName>,
+
+		/// Notification sink.
+		sink: NotificationsSink,
+	},
+
+	/// Substream was closed.
+	NotificationStreamClosed {
+		/// Peer ID.
+		peer: PeerId,
+	},
+
+	/// Notification was received from the substream.
+	NotificationReceived {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Received notification.
+		notification: Vec<u8>,
+	},
+
+	/// Notification sink has been replaced.
+	NotificationSinkReplaced {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Notification sink.
+		sink: NotificationsSink,
+	},
+}
+
+/// Notification commands.
+///
+/// Sent by the installed protocols to `Notifications` to open/close/modify substreams.
+#[derive(Debug)]
+pub enum NotificationCommand {
+	/// Instruct `Notifications` to open a substream to peer.
+	#[allow(unused)]
+	OpenSubstream(PeerId),
+
+	/// Instruct `Notifications` to close the substream to peer.
+	#[allow(unused)]
+	CloseSubstream(PeerId),
+
+	/// Set handshake for the notifications protocol.
+	SetHandshake(Vec<u8>),
+}
+
+/// Context assigned to each peer.
+///
+/// Contains `NotificationsSink` used by [`NotificationService`] to send notifications
+/// and an additional, distributable `NotificationsSink` which the protocol may acquire
+/// if it wishes to send notifications through `NotificationsSink` directly.
+///
+/// The distributable `NoticationsSink` is wrapped in an `Arc<Mutex<>>` to allow
+/// `NotificationsService` to swap the underlying sink in case it's replaced.
+#[derive(Debug, Clone)]
+struct PeerContext {
+	/// Sink for sending notificaitons.
+	sink: NotificationsSink,
+
+	/// Distributable notification sink.
+	shared_sink: NotificationSink,
+}
+
+/// Handle that is passed on to the notifications protocol.
+#[derive(Debug)]
+pub struct NotificationHandle {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// TX channel for sending commands to `Notifications`.
+	tx: mpsc::Sender<NotificationCommand>,
+
+	/// RX channel for receiving events from `Notifications`.
+	rx: TracingUnboundedReceiver<InnerNotificationEvent>,
+
+	/// All subscribers of `NotificationEvent`s.
+	subscribers: Subscribers,
+
+	/// Connected peers.
+	peers: HashMap<PeerId, PeerContext>,
+}
+
+impl NotificationHandle {
+	/// Create new [`NotificationHandle`].
+	fn new(
+		protocol: ProtocolName,
+		tx: mpsc::Sender<NotificationCommand>,
+		rx: TracingUnboundedReceiver<InnerNotificationEvent>,
+		subscribers: Arc<Mutex<Vec<TracingUnboundedSender<InnerNotificationEvent>>>>,
+	) -> Self {
+		Self { protocol, tx, rx, subscribers, peers: HashMap::new() }
+	}
+}
+
+#[async_trait::async_trait]
+impl NotificationService for NotificationHandle {
+	/// Instruct `Notifications` to open a new substream for `peer`.
+	async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		todo!("support for opening substreams not implemented yet");
+	}
+
+	/// Instruct `Notifications` to close substream for `peer`.
+	async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		todo!("support for closing substreams not implemented yet, call `NetworkService::disconnect_peer()` instead");
+	}
+
+	/// Send synchronous `notification` to `peer`.
+	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>) {
+		if let Some(info) = self.peers.get(&peer) {
+			metrics::register_notification_sent(
+				&info.sink.metrics(),
+				&self.protocol,
+				notification.len(),
+			);
+
+			let _ = info.sink.send_sync_notification(notification);
+		}
+	}
+
+	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
+	async fn send_async_notification(
+		&self,
+		peer: &PeerId,
+		notification: Vec<u8>,
+	) -> Result<(), error::Error> {
+		let notification_len = notification.len();
+		let sink = &self.peers.get(&peer).ok_or_else(|| error::Error::PeerDoesntExist(*peer))?.sink;
+
+		sink.reserve_notification()
+			.await
+			.map_err(|_| error::Error::ConnectionClosed)?
+			.send(notification)
+			.map_err(|_| error::Error::ChannelClosed)
+			.map(|res| {
+				metrics::register_notification_sent(
+					&sink.metrics(),
+					&self.protocol,
+					notification_len,
+				);
+				res
+			})
+	}
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	async fn set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
+		log::trace!(target: LOG_TARGET, "{}: set handshake to {handshake:?}", self.protocol);
+
+		self.tx.send(NotificationCommand::SetHandshake(handshake)).await.map_err(|_| ())
+	}
+
+	/// Non-blocking variant of `set_handshake()` that attempts to update the handshake
+	/// and returns an error if the channel is blocked.
+	///
+	/// Technically the function can return an error if the channel to `Notifications` is closed
+	/// but that doesn't happen under normal operation.
+	fn try_set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
+		self.tx.try_send(NotificationCommand::SetHandshake(handshake)).map_err(|_| ())
+	}
+
+	/// Get next event from the `Notifications` event stream.
+	async fn next_event(&mut self) -> Option<NotificationEvent> {
+		loop {
+			match self.rx.next().await? {
+				InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } =>
+					return Some(NotificationEvent::ValidateInboundSubstream {
+						peer,
+						handshake,
+						result_tx,
+					}),
+				InnerNotificationEvent::NotificationStreamOpened {
+					peer,
+					handshake,
+					negotiated_fallback,
+					direction,
+					sink,
+				} => {
+					self.peers.insert(
+						peer,
+						PeerContext {
+							sink: sink.clone(),
+							shared_sink: Arc::new(Mutex::new((sink, self.protocol.clone()))),
+						},
+					);
+					return Some(NotificationEvent::NotificationStreamOpened {
+						peer,
+						handshake,
+						direction,
+						negotiated_fallback,
+					})
+				},
+				InnerNotificationEvent::NotificationStreamClosed { peer } => {
+					self.peers.remove(&peer);
+					return Some(NotificationEvent::NotificationStreamClosed { peer })
+				},
+				InnerNotificationEvent::NotificationReceived { peer, notification } =>
+					return Some(NotificationEvent::NotificationReceived { peer, notification }),
+				InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => {
+					match self.peers.get_mut(&peer) {
+						None => log::error!(
+							"{}: notification sink replaced for {peer} but peer does not exist",
+							self.protocol
+						),
+						Some(context) => {
+							context.sink = sink.clone();
+							*context.shared_sink.lock() = (sink.clone(), self.protocol.clone());
+						},
+					}
+				},
+			}
+		}
+	}
+
+	// Clone [`NotificationService`]
+	fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+		let mut subscribers = self.subscribers.lock();
+		let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000);
+		subscribers.push(event_tx);
+
+		Ok(Box::new(NotificationHandle {
+			protocol: self.protocol.clone(),
+			tx: self.tx.clone(),
+			rx: event_rx,
+			peers: self.peers.clone(),
+			subscribers: self.subscribers.clone(),
+		}))
+	}
+
+	/// Get protocol name.
+	fn protocol(&self) -> &ProtocolName {
+		&self.protocol
+	}
+
+	/// Get message sink of the peer.
+	fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+		match self.peers.get(peer) {
+			Some(context) => Some(Box::new(context.shared_sink.clone())),
+			None => None,
+		}
+	}
+}
+
+/// Channel pair which allows `Notifications` to interact with a protocol.
+#[derive(Debug)]
+pub struct ProtocolHandlePair {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// Subscribers of the notification protocol events.
+	subscribers: Subscribers,
+
+	// Receiver for notification commands received from the protocol implementation.
+	rx: mpsc::Receiver<NotificationCommand>,
+}
+
+impl ProtocolHandlePair {
+	/// Create new [`ProtocolHandlePair`].
+	fn new(
+		protocol: ProtocolName,
+		subscribers: Subscribers,
+		rx: mpsc::Receiver<NotificationCommand>,
+	) -> Self {
+		Self { protocol, subscribers, rx }
+	}
+
+	/// Consume `self` and split [`ProtocolHandlePair`] into a handle which allows it to send events
+	/// to the protocol and a stream of commands received from the protocol.
+	pub(crate) fn split(
+		self,
+	) -> (ProtocolHandle, Box<dyn Stream<Item = NotificationCommand> + Send + Unpin>) {
+		(
+			ProtocolHandle::new(self.protocol, self.subscribers),
+			Box::new(ReceiverStream::new(self.rx)),
+		)
+	}
+}
+
+/// Handle that is passed on to `Notifications` and allows it to directly communicate
+/// with the protocol.
+#[derive(Debug, Clone)]
+pub(crate) struct ProtocolHandle {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// Subscribers of the notification protocol.
+	subscribers: Subscribers,
+
+	/// Number of connected peers.
+	num_peers: usize,
+
+	/// Delegate validation to `Peerset`.
+	delegate_to_peerset: bool,
+
+	/// Prometheus metrics.
+	metrics: Option<metrics::Metrics>,
+}
+
+pub(crate) enum ValidationCallResult {
+	WaitForValidation(oneshot::Receiver<ValidationResult>),
+	Delegated,
+}
+
+impl ProtocolHandle {
+	/// Create new [`ProtocolHandle`].
+	fn new(protocol: ProtocolName, subscribers: Subscribers) -> Self {
+		Self { protocol, subscribers, num_peers: 0usize, metrics: None, delegate_to_peerset: false }
+	}
+
+	/// Set metrics.
+	pub fn set_metrics(&mut self, metrics: Option<metrics::Metrics>) {
+		self.metrics = metrics;
+	}
+
+	/// Delegate validation to `Peerset`.
+	///
+	/// Protocols that do not do any validation themselves and only rely on `Peerset` handling
+	/// validation can disable protocol-side validation entirely by delegating all validation to
+	/// `Peerset`.
+	pub fn delegate_to_peerset(&mut self, delegate: bool) {
+		self.delegate_to_peerset = delegate;
+	}
+
+	/// Report to the protocol that a substream has been opened and it must be validated by the
+	/// protocol.
+	///
+	/// Return `oneshot::Receiver` which allows `Notifications` to poll for the validation result
+	/// from protocol.
+	pub fn report_incoming_substream(
+		&self,
+		peer: PeerId,
+		handshake: Vec<u8>,
+	) -> Result<ValidationCallResult, ()> {
+		let subscribers = self.subscribers.lock();
+
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: report incoming substream for {peer}, handshake {handshake:?}",
+			self.protocol
+		);
+
+		if self.delegate_to_peerset {
+			return Ok(ValidationCallResult::Delegated)
+		}
+
+		// if there is only one subscriber, `Notifications` can wait directly on the
+		// `oneshot::channel()`'s RX half without indirection
+		if subscribers.len() == 1 {
+			let (result_tx, rx) = oneshot::channel();
+			return subscribers[0]
+				.unbounded_send(InnerNotificationEvent::ValidateInboundSubstream {
+					peer,
+					handshake,
+					result_tx,
+				})
+				.map(|_| ValidationCallResult::WaitForValidation(rx))
+				.map_err(|_| ())
+		}
+
+		// if there are multiple subscribers, create a task which waits for all of the
+		// validations to finish and returns the combined result to `Notifications`
+		let mut results: FuturesUnordered<_> = subscribers
+			.iter()
+			.filter_map(|subscriber| {
+				let (result_tx, rx) = oneshot::channel();
+
+				subscriber
+					.unbounded_send(InnerNotificationEvent::ValidateInboundSubstream {
+						peer,
+						handshake: handshake.clone(),
+						result_tx,
+					})
+					.is_ok()
+					.then_some(rx)
+			})
+			.collect();
+
+		let (tx, rx) = oneshot::channel();
+		tokio::spawn(async move {
+			while let Some(event) = results.next().await {
+				match event {
+					Err(_) | Ok(ValidationResult::Reject) =>
+						return tx.send(ValidationResult::Reject),
+					Ok(ValidationResult::Accept) => {},
+				}
+			}
+
+			return tx.send(ValidationResult::Accept)
+		});
+
+		Ok(ValidationCallResult::WaitForValidation(rx))
+	}
+
+	/// Report to the protocol that a substream has been opened and that it can now use the handle
+	/// to send notifications to the remote peer.
+	pub fn report_substream_opened(
+		&mut self,
+		peer: PeerId,
+		direction: Direction,
+		handshake: Vec<u8>,
+		negotiated_fallback: Option<ProtocolName>,
+		sink: NotificationsSink,
+	) -> Result<(), ()> {
+		metrics::register_substream_opened(&self.metrics, &self.protocol);
+
+		let mut subscribers = self.subscribers.lock();
+		log::trace!(target: LOG_TARGET, "{}: substream opened for {peer:?}", self.protocol);
+
+		subscribers.retain(|subscriber| {
+			subscriber
+				.unbounded_send(InnerNotificationEvent::NotificationStreamOpened {
+					peer,
+					direction,
+					handshake: handshake.clone(),
+					negotiated_fallback: negotiated_fallback.clone(),
+					sink: sink.clone(),
+				})
+				.is_ok()
+		});
+		self.num_peers += 1;
+
+		Ok(())
+	}
+
+	/// Substream was closed.
+	pub fn report_substream_closed(&mut self, peer: PeerId) -> Result<(), ()> {
+		metrics::register_substream_closed(&self.metrics, &self.protocol);
+
+		let mut subscribers = self.subscribers.lock();
+		log::trace!(target: LOG_TARGET, "{}: substream closed for {peer:?}", self.protocol);
+
+		subscribers.retain(|subscriber| {
+			subscriber
+				.unbounded_send(InnerNotificationEvent::NotificationStreamClosed { peer })
+				.is_ok()
+		});
+		self.num_peers -= 1;
+
+		Ok(())
+	}
+
+	/// Notification was received from the substream.
+	pub fn report_notification_received(
+		&mut self,
+		peer: PeerId,
+		notification: Vec<u8>,
+	) -> Result<(), ()> {
+		metrics::register_notification_received(&self.metrics, &self.protocol, notification.len());
+
+		let mut subscribers = self.subscribers.lock();
+		log::trace!(target: LOG_TARGET, "{}: notification received from {peer:?}", self.protocol);
+
+		subscribers.retain(|subscriber| {
+			subscriber
+				.unbounded_send(InnerNotificationEvent::NotificationReceived {
+					peer,
+					notification: notification.clone(),
+				})
+				.is_ok()
+		});
+
+		Ok(())
+	}
+
+	/// Notification sink was replaced.
+	pub fn report_notification_sink_replaced(
+		&mut self,
+		peer: PeerId,
+		sink: NotificationsSink,
+	) -> Result<(), ()> {
+		let mut subscribers = self.subscribers.lock();
+
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: notification sink replaced for {peer:?}",
+			self.protocol
+		);
+
+		subscribers.retain(|subscriber| {
+			subscriber
+				.unbounded_send(InnerNotificationEvent::NotificationSinkReplaced {
+					peer,
+					sink: sink.clone(),
+				})
+				.is_ok()
+		});
+
+		Ok(())
+	}
+
+	/// Get the number of connected peers.
+	pub fn num_peers(&self) -> usize {
+		self.num_peers
+	}
+}
+
+/// Create new (protocol, notification) handle pair.
+///
+/// Handle pair allows `Notifications` and the protocol to communicate with each other directly.
+pub fn notification_service(
+	protocol: ProtocolName,
+) -> (ProtocolHandlePair, Box<dyn NotificationService>) {
+	let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE);
+	let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000);
+	let subscribers = Arc::new(Mutex::new(vec![event_tx]));
+
+	(
+		ProtocolHandlePair::new(protocol.clone(), subscribers.clone(), cmd_rx),
+		Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)),
+	)
+}
diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs
new file mode 100644
index 00000000000..02ba9e1711c
--- /dev/null
+++ b/substrate/client/network/src/protocol/notifications/service/tests.rs
@@ -0,0 +1,839 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use super::*;
+use crate::protocol::notifications::handler::{
+	NotificationsSinkMessage, ASYNC_NOTIFICATIONS_BUFFER_SIZE,
+};
+
+use std::future::Future;
+
+#[tokio::test]
+async fn validate_and_accept_substream() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (handle, _stream) = proto.split();
+
+	let peer_id = PeerId::random();
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+}
+
+#[tokio::test]
+async fn substream_opened() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, _, _) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+
+	let peer_id = PeerId::random();
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+}
+
+#[tokio::test]
+async fn send_sync_notification() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
+	assert_eq!(
+		sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] })
+	);
+}
+
+#[tokio::test]
+async fn send_async_notification() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] })
+	);
+}
+
+#[tokio::test]
+async fn send_sync_notification_to_non_existent_peer() {
+	let (proto, notif) = notification_service("/proto/1".into());
+	let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
+	let (_handle, _stream) = proto.split();
+	let peer = PeerId::random();
+
+	// as per the original implementation, the call doesn't fail
+	notif.send_sync_notification(&peer, vec![1, 3, 3, 7])
+}
+
+#[tokio::test]
+async fn send_async_notification_to_non_existent_peer() {
+	let (proto, notif) = notification_service("/proto/1".into());
+	let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
+	let (_handle, _stream) = proto.split();
+	let peer = PeerId::random();
+
+	if let Err(error::Error::PeerDoesntExist(peer_id)) =
+		notif.send_async_notification(&peer, vec![1, 3, 3, 7]).await
+	{
+		assert_eq!(peer, peer_id);
+	} else {
+		panic!("invalid error received from `send_async_notification()`");
+	}
+}
+
+#[tokio::test]
+async fn receive_notification() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// notification is received
+	handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap();
+
+	if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(notification, vec![1, 3, 3, 8]);
+	} else {
+		panic!("invalid event received");
+	}
+}
+
+#[tokio::test]
+async fn backpressure_works() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// fill the message buffer with messages
+	for i in 0..=ASYNC_NOTIFICATIONS_BUFFER_SIZE {
+		assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, i as u8]))
+			.is_ready());
+	}
+
+	// try to send one more message and verify that the call blocks
+	assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_pending());
+
+	// release one slot from the buffer for new message
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 0] })
+	);
+
+	// verify that a message can be sent
+	assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_ready());
+}
+
+#[tokio::test]
+async fn peer_disconnects_then_sync_notification_is_sent() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, _, sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// report that a substream has been closed but don't poll `notif` to receive this
+	// information
+	handle.report_substream_closed(peer_id).unwrap();
+	drop(sync_rx);
+
+	// as per documentation, error is not reported but the notification is silently dropped
+	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 7]);
+}
+
+#[tokio::test]
+async fn peer_disconnects_then_async_notification_is_sent() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, async_rx, _) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// report that a substream has been closed but don't poll `notif` to receive this
+	// information
+	handle.report_substream_closed(peer_id).unwrap();
+	drop(async_rx);
+
+	// as per documentation, error is not reported but the notification is silently dropped
+	if let Err(error::Error::ConnectionClosed) =
+		notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await
+	{
+	} else {
+		panic!("invalid state after calling `send_async_notificatio()` on closed connection")
+	}
+}
+
+#[tokio::test]
+async fn cloned_service_opening_substream_works() {
+	let (proto, mut notif1) = notification_service("/proto/1".into());
+	let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random());
+	let (handle, _stream) = proto.split();
+	let mut notif2 = notif1.clone().unwrap();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(mut result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	// verify that `notif1` gets the event
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif1.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+
+	// verify that because only one listener has thus far send their result, the result is
+	// pending
+	assert!(result_rx.try_recv().is_err());
+
+	// verify that `notif2` also gets the event
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif2.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+}
+
+#[tokio::test]
+async fn cloned_service_one_service_rejects_substream() {
+	let (proto, mut notif1) = notification_service("/proto/1".into());
+	let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random());
+	let (handle, _stream) = proto.split();
+	let mut notif2 = notif1.clone().unwrap();
+	let mut notif3 = notif2.clone().unwrap();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(mut result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	for notif in vec![&mut notif1, &mut notif2] {
+		if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+			notif.next_event().await
+		{
+			assert_eq!(peer_id, peer);
+			assert_eq!(handshake, vec![1, 3, 3, 7]);
+			let _ = result_tx.send(ValidationResult::Accept).unwrap();
+		} else {
+			panic!("invalid event received");
+		}
+	}
+
+	// `notif3` has not yet sent their validation result
+	assert!(result_rx.try_recv().is_err());
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif3.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Reject).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Reject);
+}
+
+#[tokio::test]
+async fn cloned_service_opening_substream_sending_and_receiving_notifications_work() {
+	let (proto, mut notif1) = notification_service("/proto/1".into());
+	let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let mut notif2 = notif1.clone().unwrap();
+	let mut notif3 = notif1.clone().unwrap();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
+		// accept the inbound substream for all services
+		if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+			notif.next_event().await
+		{
+			assert_eq!(peer_id, peer);
+			assert_eq!(handshake, vec![1, 3, 3, 7]);
+			let _ = result_tx.send(ValidationResult::Accept).unwrap();
+		} else {
+			panic!("invalid event received");
+		}
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that then notification stream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
+		if let Some(NotificationEvent::NotificationStreamOpened {
+			peer,
+			negotiated_fallback,
+			handshake,
+			direction,
+		}) = notif.next_event().await
+		{
+			assert_eq!(peer_id, peer);
+			assert_eq!(negotiated_fallback, None);
+			assert_eq!(handshake, vec![1, 3, 3, 7]);
+			assert_eq!(direction, Direction::Inbound);
+		} else {
+			panic!("invalid event received");
+		}
+	}
+	// receive a notification from peer and verify all services receive it
+	handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap();
+
+	for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
+		if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
+			notif.next_event().await
+		{
+			assert_eq!(peer_id, peer);
+			assert_eq!(notification, vec![1, 3, 3, 8]);
+		} else {
+			panic!("invalid event received");
+		}
+	}
+
+	for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter().enumerate() {
+		// send notification from each service and verify peer receives it
+		notif.send_sync_notification(&peer_id, vec![1, 3, 3, i as u8]);
+		assert_eq!(
+			sync_rx.next().await,
+			Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, i as u8] })
+		);
+	}
+
+	// close the substream for peer and verify all services receive the event
+	handle.report_substream_closed(peer_id).unwrap();
+
+	for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
+		if let Some(NotificationEvent::NotificationStreamClosed { peer }) = notif.next_event().await
+		{
+			assert_eq!(peer_id, peer);
+		} else {
+			panic!("invalid event received");
+		}
+	}
+}
+
+#[tokio::test]
+async fn sending_notifications_using_notifications_sink_works() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// get a copy of the notification sink and send a synchronous notification using.
+	let sink = notif.message_sink(&peer_id).unwrap();
+	sink.send_sync_notification(vec![1, 3, 3, 6]);
+
+	// send an asynchronous notification using the acquired notifications sink.
+	let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap();
+
+	assert_eq!(
+		sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }),
+	);
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }),
+	);
+
+	// send notifications using the stored notification sink as well.
+	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+
+	assert_eq!(
+		sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }),
+	);
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }),
+	);
+}
+
+#[test]
+fn try_to_get_notifications_sink_for_non_existent_peer() {
+	let (_proto, notif) = notification_service("/proto/1".into());
+	assert!(notif.message_sink(&PeerId::random()).is_none());
+}
+
+#[tokio::test]
+async fn notification_sink_replaced() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random());
+	let (mut handle, _stream) = proto.split();
+	let peer_id = PeerId::random();
+
+	// validate inbound substream
+	let ValidationCallResult::WaitForValidation(result_rx) =
+		handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap()
+	else {
+		panic!("peerset not enabled");
+	};
+
+	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
+		notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		let _ = result_tx.send(ValidationResult::Accept).unwrap();
+	} else {
+		panic!("invalid event received");
+	}
+	assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept);
+
+	// report that a substream has been opened
+	handle
+		.report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink)
+		.unwrap();
+
+	if let Some(NotificationEvent::NotificationStreamOpened {
+		peer,
+		negotiated_fallback,
+		handshake,
+		direction,
+	}) = notif.next_event().await
+	{
+		assert_eq!(peer_id, peer);
+		assert_eq!(negotiated_fallback, None);
+		assert_eq!(handshake, vec![1, 3, 3, 7]);
+		assert_eq!(direction, Direction::Inbound);
+	} else {
+		panic!("invalid event received");
+	}
+
+	// get a copy of the notification sink and send a synchronous notification using.
+	let sink = notif.message_sink(&peer_id).unwrap();
+	sink.send_sync_notification(vec![1, 3, 3, 6]);
+
+	// send an asynchronous notification using the acquired notifications sink.
+	let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap();
+
+	assert_eq!(
+		sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }),
+	);
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }),
+	);
+
+	// send notifications using the stored notification sink as well.
+	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+
+	assert_eq!(
+		sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }),
+	);
+	assert_eq!(
+		async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }),
+	);
+
+	// the initial connection was closed and `Notifications` switched to secondary connection
+	// and emitted `CustomProtocolReplaced` which informs the local `NotificationService` that
+	// the notification sink was replaced.
+	let (new_sink, mut new_async_rx, mut new_sync_rx) = NotificationsSink::new(PeerId::random());
+	handle.report_notification_sink_replaced(peer_id, new_sink).unwrap();
+
+	// drop the old sinks and poll `notif` once to register the sink replacement
+	drop(sync_rx);
+	drop(async_rx);
+
+	futures::future::poll_fn(|cx| {
+		let _ = std::pin::Pin::new(&mut notif.next_event()).poll(cx);
+		std::task::Poll::Ready(())
+	})
+	.await;
+
+	// verify that using the `NotificationService` API automatically results in using the correct
+	// sink
+	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+
+	assert_eq!(
+		new_sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }),
+	);
+	assert_eq!(
+		new_async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }),
+	);
+
+	// now send two notifications using the acquired message sink and verify that
+	// it's also updated
+	sink.send_sync_notification(vec![1, 3, 3, 6]);
+
+	// send an asynchronous notification using the acquired notifications sink.
+	let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap();
+
+	assert_eq!(
+		new_sync_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }),
+	);
+	assert_eq!(
+		new_async_rx.next().await,
+		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }),
+	);
+}
+
+#[tokio::test]
+async fn set_handshake() {
+	let (proto, mut notif) = notification_service("/proto/1".into());
+	let (_handle, mut stream) = proto.split();
+
+	assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok());
+
+	match stream.next().await {
+		Some(NotificationCommand::SetHandshake(handshake)) => {
+			assert_eq!(handshake, vec![1, 3, 3, 7]);
+		},
+		_ => panic!("invalid event received"),
+	}
+
+	for _ in 0..COMMAND_QUEUE_SIZE {
+		assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok());
+	}
+
+	assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_err());
+}
diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs
index d57c24144f5..92d269f89c3 100644
--- a/substrate/client/network/src/protocol/notifications/tests.rs
+++ b/substrate/client/network/src/protocol/notifications/tests.rs
@@ -22,6 +22,7 @@ use crate::{
 	peer_store::PeerStore,
 	protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig},
 	protocol_controller::{ProtoSetConfig, ProtocolController, SetId},
+	service::traits::{NotificationEvent, ValidationResult},
 };
 
 use futures::{future::BoxFuture, prelude::*};
@@ -70,6 +71,8 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
 			.timeout(Duration::from_secs(20))
 			.boxed();
 
+		let (protocol_handle_pair, mut notif_service) =
+			crate::protocol::notifications::service::notification_service("/foo".into());
 		let peer_store = PeerStore::new(if index == 0 {
 			keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect()
 		} else {
@@ -91,16 +94,22 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
 			Box::new(peer_store.handle()),
 		);
 
+		let (notif_handle, command_stream) = protocol_handle_pair.split();
 		let behaviour = CustomProtoWithAddr {
 			inner: Notifications::new(
 				vec![controller_handle],
 				from_controller,
-				iter::once(ProtocolConfig {
-					name: "/foo".into(),
-					fallback_names: Vec::new(),
-					handshake: Vec::new(),
-					max_notification_size: 1024 * 1024,
-				}),
+				&None,
+				iter::once((
+					ProtocolConfig {
+						name: "/foo".into(),
+						fallback_names: Vec::new(),
+						handshake: Vec::new(),
+						max_notification_size: 1024 * 1024,
+					},
+					notif_handle,
+					command_stream,
+				)),
 			),
 			peer_store_future: peer_store.run().boxed(),
 			protocol_controller_future: controller.run().boxed(),
@@ -118,6 +127,16 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
 		};
 
 		let runtime = tokio::runtime::Runtime::new().unwrap();
+		runtime.spawn(async move {
+			loop {
+				if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } =
+					notif_service.next_event().await.unwrap()
+				{
+					result_tx.send(ValidationResult::Accept).unwrap();
+				}
+			}
+		});
+
 		let mut swarm = SwarmBuilder::with_executor(
 			transport,
 			behaviour,
diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs
index 3a305011ded..4c8f119baa2 100644
--- a/substrate/client/network/src/protocol_controller.rs
+++ b/substrate/client/network/src/protocol_controller.rs
@@ -847,6 +847,7 @@ mod tests {
 	use super::*;
 	use crate::{peer_store::PeerStoreProvider, ReputationChange};
 	use libp2p::PeerId;
+	use sc_network_common::role::ObservedRole;
 	use sc_utils::mpsc::{tracing_unbounded, TryRecvError};
 	use std::collections::HashSet;
 
@@ -858,8 +859,10 @@ mod tests {
 			fn is_banned(&self, peer_id: &PeerId) -> bool;
 			fn register_protocol(&self, protocol_handle: ProtocolHandle);
 			fn report_disconnect(&mut self, peer_id: PeerId);
+			fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
 			fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
 			fn peer_reputation(&self, peer_id: &PeerId) -> i32;
+			fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
 			fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec<PeerId>;
 		}
 	}
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index c1df48ad785..43a3ab09115 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -54,6 +54,7 @@ use crate::{
 	ReputationChange,
 };
 
+use codec::DecodeAll;
 use either::Either;
 use futures::{channel::oneshot, prelude::*};
 #[allow(deprecated)]
@@ -71,10 +72,13 @@ use libp2p::{
 	Multiaddr, PeerId,
 };
 use log::{debug, error, info, trace, warn};
-use metrics::{Histogram, HistogramVec, MetricSources, Metrics};
+use metrics::{Histogram, MetricSources, Metrics};
 use parking_lot::Mutex;
 
-use sc_network_common::ExHashT;
+use sc_network_common::{
+	role::{ObservedRole, Roles},
+	ExHashT,
+};
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_runtime::traits::Block as BlockT;
 
@@ -118,12 +122,6 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	bandwidth: Arc<transport::BandwidthSinks>,
 	/// Channel that sends messages to the actual worker.
 	to_worker: TracingUnboundedSender<ServiceToWorkerMsg>,
-	/// For each peer and protocol combination, an object that allows sending notifications to
-	/// that peer. Updated by the [`NetworkWorker`].
-	peers_notifications_sinks: Arc<Mutex<HashMap<(PeerId, ProtocolName), NotificationsSink>>>,
-	/// Field extracted from the [`Metrics`] struct and necessary to report the
-	/// notifications-related metrics.
-	notifications_sizes_metric: Option<HistogramVec>,
 	/// Protocol name -> `SetId` mapping for notification protocols. The map never changes after
 	/// initialization.
 	notification_protocol_ids: HashMap<ProtocolName, SetId>,
@@ -132,6 +130,8 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	protocol_handles: Vec<protocol_controller::ProtocolHandle>,
 	/// Shortcut to sync protocol handle (`protocol_handles[0]`).
 	sync_protocol_handle: protocol_controller::ProtocolHandle,
+	/// Handle to `PeerStore`.
+	peer_store_handle: PeerStoreHandle,
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
 	/// compatibility.
 	_marker: PhantomData<H>,
@@ -199,7 +199,7 @@ where
 		)?;
 		for notification_protocol in &notification_protocols {
 			ensure_addresses_consistent_with_transport(
-				notification_protocol.set_config.reserved_nodes.iter().map(|x| &x.multiaddr),
+				notification_protocol.set_config().reserved_nodes.iter().map(|x| &x.multiaddr),
 				&network_config.transport,
 			)?;
 		}
@@ -241,7 +241,7 @@ where
 					.map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX));
 				let notifs_max = notification_protocols
 					.iter()
-					.map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX));
+					.map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX));
 
 				// A "default" max is added to cover all the other protocols: ping, identify,
 				// kademlia, block announces, and transactions.
@@ -273,7 +273,7 @@ where
 
 		// We must prepend a hardcoded default peer set to notification protocols.
 		let all_peer_sets_iter = iter::once(&network_config.default_peers_set)
-			.chain(notification_protocols.iter().map(|protocol| &protocol.set_config));
+			.chain(notification_protocols.iter().map(|protocol| protocol.set_config()));
 
 		let (protocol_handles, protocol_controllers): (Vec<_>, Vec<_>) = all_peer_sets_iter
 			.enumerate()
@@ -312,21 +312,9 @@ where
 			iter::once(&params.block_announce_config)
 				.chain(notification_protocols.iter())
 				.enumerate()
-				.map(|(index, protocol)| {
-					(protocol.notifications_protocol.clone(), SetId::from(index))
-				})
+				.map(|(index, protocol)| (protocol.protocol_name().clone(), SetId::from(index)))
 				.collect();
 
-		let protocol = Protocol::new(
-			From::from(&params.role),
-			notification_protocols.clone(),
-			params.block_announce_config,
-			params.peer_store.clone(),
-			protocol_handles.clone(),
-			from_protocol_controllers,
-			params.tx,
-		)?;
-
 		let known_addresses = {
 			// Collect all reserved nodes and bootnodes addresses.
 			let mut addresses: Vec<_> = network_config
@@ -336,7 +324,7 @@ where
 				.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
 				.chain(notification_protocols.iter().flat_map(|protocol| {
 					protocol
-						.set_config
+						.set_config()
 						.reserved_nodes
 						.iter()
 						.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
@@ -389,6 +377,16 @@ where
 		let num_connected = Arc::new(AtomicUsize::new(0));
 		let external_addresses = Arc::new(Mutex::new(HashSet::new()));
 
+		let (protocol, notif_protocol_handles) = Protocol::new(
+			From::from(&params.role),
+			&params.metrics_registry,
+			notification_protocols,
+			params.block_announce_config,
+			params.peer_store.clone(),
+			protocol_handles.clone(),
+			from_protocol_controllers,
+		)?;
+
 		// Build the swarm.
 		let (mut swarm, bandwidth): (Swarm<Behaviour<B>>, _) = {
 			let user_agent =
@@ -508,7 +506,6 @@ where
 		}
 
 		let listen_addresses = Arc::new(Mutex::new(HashSet::new()));
-		let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new()));
 
 		let service = Arc::new(NetworkService {
 			bandwidth,
@@ -518,13 +515,10 @@ where
 			local_peer_id,
 			local_identity,
 			to_worker,
-			peers_notifications_sinks: peers_notifications_sinks.clone(),
-			notifications_sizes_metric: metrics
-				.as_ref()
-				.map(|metrics| metrics.notifications_sizes.clone()),
 			notification_protocol_ids,
 			protocol_handles,
 			sync_protocol_handle,
+			peer_store_handle: params.peer_store.clone(),
 			_marker: PhantomData,
 			_block: Default::default(),
 		});
@@ -539,8 +533,8 @@ where
 			metrics,
 			boot_node_ids,
 			reported_invalid_boot_nodes: Default::default(),
-			peers_notifications_sinks,
 			peer_store_handle: params.peer_store,
+			notif_protocol_handles,
 			_marker: Default::default(),
 			_block: Default::default(),
 		})
@@ -567,7 +561,7 @@ where
 
 	/// Returns the number of peers we're connected to.
 	pub fn num_connected_peers(&self) -> usize {
-		self.network_service.behaviour().user_protocol().num_connected_peers()
+		self.network_service.behaviour().user_protocol().num_sync_peers()
 	}
 
 	/// Adds an address for a node.
@@ -991,6 +985,16 @@ where
 	fn sync_num_connected(&self) -> usize {
 		self.num_connected.load(Ordering::Relaxed)
 	}
+
+	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+		match Roles::decode_all(&mut &handshake[..]) {
+			Ok(role) => Some(role.into()),
+			Err(_) => {
+				log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}");
+				self.peer_store_handle.peer_role(&peer_id)
+			},
+		}
+	}
 }
 
 impl<B, H> NetworkEventStream for NetworkService<B, H>
@@ -1010,68 +1014,20 @@ where
 	B: BlockT + 'static,
 	H: ExHashT,
 {
-	fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		// We clone the `NotificationsSink` in order to be able to unlock the network-wide
-		// `peers_notifications_sinks` mutex as soon as possible.
-		let sink = {
-			let peers_notifications_sinks = self.peers_notifications_sinks.lock();
-			if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) {
-				sink.clone()
-			} else {
-				// Notification silently discarded, as documented.
-				debug!(
-					target: "sub-libp2p",
-					"Attempted to send notification on missing or closed substream: {}, {:?}",
-					target, protocol,
-				);
-				return
-			}
-		};
-
-		if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() {
-			notifications_sizes_metric
-				.with_label_values(&["out", &protocol])
-				.observe(message.len() as f64);
-		}
-
-		// Sending is communicated to the `NotificationsSink`.
-		trace!(
-			target: "sub-libp2p",
-			"External API => Notification({:?}, {:?}, {} bytes)",
-			target, protocol, message.len()
-		);
-		trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target);
-		sink.send_sync_notification(message);
+	fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec<u8>) {
+		unimplemented!();
 	}
 
 	fn notification_sender(
 		&self,
-		target: PeerId,
-		protocol: ProtocolName,
+		_target: PeerId,
+		_protocol: ProtocolName,
 	) -> Result<Box<dyn NotificationSenderT>, NotificationSenderError> {
-		// We clone the `NotificationsSink` in order to be able to unlock the network-wide
-		// `peers_notifications_sinks` mutex as soon as possible.
-		let sink = {
-			let peers_notifications_sinks = self.peers_notifications_sinks.lock();
-			if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) {
-				sink.clone()
-			} else {
-				return Err(NotificationSenderError::Closed)
-			}
-		};
-
-		let notification_size_metric = self
-			.notifications_sizes_metric
-			.as_ref()
-			.map(|histogram| histogram.with_label_values(&["out", &protocol]));
-
-		Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric }))
+		unimplemented!();
 	}
 
-	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
-		let _ = self
-			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake));
+	fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+		unimplemented!();
 	}
 }
 
@@ -1209,7 +1165,6 @@ enum ServiceToWorkerMsg {
 		pending_response: oneshot::Sender<Result<NetworkState, RequestFailure>>,
 	},
 	DisconnectPeer(PeerId, ProtocolName),
-	SetNotificationHandshake(ProtocolName, Vec<u8>),
 }
 
 /// Main network worker. Must be polled in order for the network to advance.
@@ -1239,11 +1194,10 @@ where
 	boot_node_ids: Arc<HashMap<PeerId, Vec<Multiaddr>>>,
 	/// Boot nodes that we already have reported as invalid.
 	reported_invalid_boot_nodes: HashSet<PeerId>,
-	/// For each peer and protocol combination, an object that allows sending notifications to
-	/// that peer. Shared with the [`NetworkService`].
-	peers_notifications_sinks: Arc<Mutex<HashMap<(PeerId, ProtocolName), NotificationsSink>>>,
 	/// Peer reputation store handle.
 	peer_store_handle: PeerStoreHandle,
+	/// Notification protocol handles.
+	notif_protocol_handles: Vec<protocol::ProtocolHandle>,
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
 	/// compatibility.
 	_marker: PhantomData<H>,
@@ -1282,8 +1236,7 @@ where
 		};
 
 		// Update the `num_connected` count shared with the `NetworkService`.
-		let num_connected_peers =
-			self.network_service.behaviour_mut().user_protocol_mut().num_connected_peers();
+		let num_connected_peers = self.network_service.behaviour().user_protocol().num_sync_peers();
 		self.num_connected.store(num_connected_peers, Ordering::Relaxed);
 
 		if let Some(metrics) = self.metrics.as_ref() {
@@ -1353,11 +1306,6 @@ where
 				.behaviour_mut()
 				.user_protocol_mut()
 				.disconnect_peer(&who, protocol_name),
-			ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake) => self
-				.network_service
-				.behaviour_mut()
-				.user_protocol_mut()
-				.set_notification_handshake(protocol, handshake),
 		}
 	}
 
@@ -1472,47 +1420,27 @@ where
 			},
 			SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened {
 				remote,
-				protocol,
+				set_id,
+				direction,
 				negotiated_fallback,
 				notifications_sink,
-				role,
 				received_handshake,
 			}) => {
-				if let Some(metrics) = self.metrics.as_ref() {
-					metrics
-						.notifications_streams_opened_total
-						.with_label_values(&[&protocol])
-						.inc();
-				}
-				{
-					let mut peers_notifications_sinks = self.peers_notifications_sinks.lock();
-					let _previous_value = peers_notifications_sinks
-						.insert((remote, protocol.clone()), notifications_sink);
-					debug_assert!(_previous_value.is_none());
-				}
-				self.event_streams.send(Event::NotificationStreamOpened {
+				let _ = self.notif_protocol_handles[usize::from(set_id)].report_substream_opened(
 					remote,
-					protocol,
-					negotiated_fallback,
-					role,
+					direction,
 					received_handshake,
-				});
+					negotiated_fallback,
+					notifications_sink,
+				);
 			},
 			SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced {
 				remote,
-				protocol,
+				set_id,
 				notifications_sink,
 			}) => {
-				let mut peers_notifications_sinks = self.peers_notifications_sinks.lock();
-				if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) {
-					*s = notifications_sink;
-				} else {
-					error!(
-						target: "sub-libp2p",
-						"NotificationStreamReplaced for non-existing substream"
-					);
-					debug_assert!(false);
-				}
+				let _ = self.notif_protocol_handles[usize::from(set_id)]
+					.report_notification_sink_replaced(remote, notifications_sink);
 
 				// TODO: Notifications might have been lost as a result of the previous
 				// connection being dropped, and as a result it would be preferable to notify
@@ -1535,31 +1463,17 @@ where
 				// role,
 				// });
 			},
-			SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol }) => {
-				if let Some(metrics) = self.metrics.as_ref() {
-					metrics
-						.notifications_streams_closed_total
-						.with_label_values(&[&protocol[..]])
-						.inc();
-				}
-				self.event_streams
-					.send(Event::NotificationStreamClosed { remote, protocol: protocol.clone() });
-				{
-					let mut peers_notifications_sinks = self.peers_notifications_sinks.lock();
-					let _previous_value = peers_notifications_sinks.remove(&(remote, protocol));
-					debug_assert!(_previous_value.is_some());
-				}
+			SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, set_id }) => {
+				let _ = self.notif_protocol_handles[usize::from(set_id)]
+					.report_substream_closed(remote);
 			},
-			SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages }) => {
-				if let Some(metrics) = self.metrics.as_ref() {
-					for (protocol, message) in &messages {
-						metrics
-							.notifications_sizes
-							.with_label_values(&["in", protocol])
-							.observe(message.len() as f64);
-					}
-				}
-				self.event_streams.send(Event::NotificationsReceived { remote, messages });
+			SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived {
+				remote,
+				set_id,
+				notification,
+			}) => {
+				let _ = self.notif_protocol_handles[usize::from(set_id)]
+					.report_notification_received(remote, notification);
 			},
 			SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration)) => {
 				if let Some(metrics) = self.metrics.as_ref() {
diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs
index 13bc4b4e7af..c349fd98c76 100644
--- a/substrate/client/network/src/service/metrics.rs
+++ b/substrate/client/network/src/service/metrics.rs
@@ -61,9 +61,6 @@ pub struct Metrics {
 	pub kbuckets_num_nodes: GaugeVec<U64>,
 	pub listeners_local_addresses: Gauge<U64>,
 	pub listeners_errors_total: Counter<U64>,
-	pub notifications_sizes: HistogramVec,
-	pub notifications_streams_closed_total: CounterVec<U64>,
-	pub notifications_streams_opened_total: CounterVec<U64>,
 	pub peerset_num_discovered: Gauge<U64>,
 	pub pending_connections: Gauge<U64>,
 	pub pending_connections_errors_total: CounterVec<U64>,
@@ -153,31 +150,6 @@ impl Metrics {
 				"substrate_sub_libp2p_listeners_errors_total",
 				"Total number of non-fatal errors reported by a listener"
 			)?, registry)?,
-			notifications_sizes: prometheus::register(HistogramVec::new(
-				HistogramOpts {
-					common_opts: Opts::new(
-						"substrate_sub_libp2p_notifications_sizes",
-						"Sizes of the notifications send to and received from all nodes"
-					),
-					buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
-						.expect("parameters are always valid values; qed"),
-				},
-				&["direction", "protocol"]
-			)?, registry)?,
-			notifications_streams_closed_total: prometheus::register(CounterVec::new(
-				Opts::new(
-					"substrate_sub_libp2p_notifications_streams_closed_total",
-					"Total number of notification substreams that have been closed"
-				),
-				&["protocol"]
-			)?, registry)?,
-			notifications_streams_opened_total: prometheus::register(CounterVec::new(
-				Opts::new(
-					"substrate_sub_libp2p_notifications_streams_opened_total",
-					"Total number of notification substreams that have been opened"
-				),
-				&["protocol"]
-			)?, registry)?,
 			peerset_num_discovered: prometheus::register(Gauge::new(
 				"substrate_sub_libp2p_peerset_num_discovered",
 				"Number of nodes stored in the peerset manager",
diff --git a/substrate/client/network/src/service/signature.rs b/substrate/client/network/src/service/signature.rs
index 024f60e4c46..5b2ba6be8cf 100644
--- a/substrate/client/network/src/service/signature.rs
+++ b/substrate/client/network/src/service/signature.rs
@@ -18,6 +18,8 @@
 //
 // If you read this, you are very thorough, congratulations.
 
+//! Signature-related code
+
 use libp2p::{
 	identity::{Keypair, PublicKey},
 	PeerId,
diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs
index bed325ede4a..f66e810be11 100644
--- a/substrate/client/network/src/service/traits.rs
+++ b/substrate/client/network/src/service/traits.rs
@@ -18,8 +18,11 @@
 //
 // If you read this, you are very thorough, congratulations.
 
+//! Traits defined by `sc-network`.
+
 use crate::{
 	config::MultiaddrWithPeerId,
+	error,
 	event::Event,
 	request_responses::{IfDisconnected, RequestFailure},
 	service::signature::Signature,
@@ -30,7 +33,9 @@ use crate::{
 use futures::{channel::oneshot, Stream};
 use libp2p::{Multiaddr, PeerId};
 
-use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc};
+use sc_network_common::role::ObservedRole;
+
+use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc};
 
 pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey};
 
@@ -221,6 +226,14 @@ pub trait NetworkPeers {
 
 	/// Returns the number of peers in the sync peer set we're connected to.
 	fn sync_num_connected(&self) -> usize;
+
+	/// Attempt to get peer role.
+	///
+	/// Right now the peer role is decoded from the received handshake for all protocols
+	/// (`/block-announces/1` has other information as well). If the handshake cannot be
+	/// decoded into a role, the role queried from `PeerStore` and if the role is not stored
+	/// there either, `None` is returned and the peer should be discarded.
+	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole>;
 }
 
 // Manual implementation to avoid extra boxing here
@@ -296,6 +309,10 @@ where
 	fn sync_num_connected(&self) -> usize {
 		T::sync_num_connected(self)
 	}
+
+	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+		T::peer_role(self, peer_id, handshake)
+	}
 }
 
 /// Provides access to network-level event stream.
@@ -611,3 +628,189 @@ where
 		T::new_best_block_imported(self, hash, number)
 	}
 }
+
+/// Substream acceptance result.
+#[derive(Debug, PartialEq, Eq)]
+pub enum ValidationResult {
+	/// Accept inbound substream.
+	Accept,
+
+	/// Reject inbound substream.
+	Reject,
+}
+
+/// Substream direction.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum Direction {
+	/// Substream opened by the remote node.
+	Inbound,
+
+	/// Substream opened by the local node.
+	Outbound,
+}
+
+impl Direction {
+	/// Is the direction inbound.
+	pub fn is_inbound(&self) -> bool {
+		std::matches!(self, Direction::Inbound)
+	}
+}
+
+/// Events received by the protocol from `Notifications`.
+#[derive(Debug)]
+pub enum NotificationEvent {
+	/// Validate inbound substream.
+	ValidateInboundSubstream {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Received handshake.
+		handshake: Vec<u8>,
+
+		/// `oneshot::Sender` for sending validation result back to `Notifications`
+		result_tx: tokio::sync::oneshot::Sender<ValidationResult>,
+	},
+
+	/// Remote identified by `PeerId` opened a substream and sent `Handshake`.
+	/// Validate `Handshake` and report status (accept/reject) to `Notifications`.
+	NotificationStreamOpened {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Is the substream inbound or outbound.
+		direction: Direction,
+
+		/// Received handshake.
+		handshake: Vec<u8>,
+
+		/// Negotiated fallback.
+		negotiated_fallback: Option<ProtocolName>,
+	},
+
+	/// Substream was closed.
+	NotificationStreamClosed {
+		/// Peer Id.
+		peer: PeerId,
+	},
+
+	/// Notification was received from the substream.
+	NotificationReceived {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Received notification.
+		notification: Vec<u8>,
+	},
+}
+
+/// Notification service
+///
+/// Defines behaviors that both the protocol implementations and `Notifications` can expect from
+/// each other.
+///
+/// `Notifications` can send two different kinds of information to protocol:
+///  * substream-related information
+///  * notification-related information
+///
+/// When an unvalidated, inbound substream is received by `Notifications`, it sends the inbound
+/// stream information (peer ID, handshake) to protocol for validation. Protocol must then verify
+/// that the handshake is valid (and in the future that it has a slot it can allocate for the peer)
+/// and then report back the `ValidationResult` which is either `Accept` or `Reject`.
+///
+/// After the validation result has been received by `Notifications`, it prepares the
+/// substream for communication by initializing the necessary sinks and emits
+/// `NotificationStreamOpened` which informs the protocol that the remote peer is ready to receive
+/// notifications.
+///
+/// Two different flavors of sending options are provided:
+///  * synchronous sending ([`NotificationService::send_sync_notification()`])
+///  * asynchronous sending ([`NotificationService::send_async_notification()`])
+///
+/// The former is used by the protocols not ready to exercise backpressure and the latter by the
+/// protocols that can do it.
+///
+/// Both local and remote peer can close the substream at any time. Local peer can do so by calling
+/// [`NotificationService::close_substream()`] which instructs `Notifications` to close the
+/// substream. Remote closing the substream is indicated to the local peer by receiving
+/// [`NotificationEvent::NotificationStreamClosed`] event.
+///
+/// In case the protocol must update its handshake while it's operating (such as updating the best
+/// block information), it can do so by calling [`NotificationService::set_handshake()`]
+/// which instructs `Notifications` to update the handshake it stored during protocol
+/// initialization.
+///
+/// All peer events are multiplexed on the same incoming event stream from `Notifications` and thus
+/// each event carries a `PeerId` so the protocol knows whose information to update when receiving
+/// an event.
+#[async_trait::async_trait]
+pub trait NotificationService: Debug + Send {
+	/// Instruct `Notifications` to open a new substream for `peer`.
+	///
+	/// `dial_if_disconnected` informs `Notifications` whether to dial
+	// the peer if there is currently no active connection to it.
+	//
+	// NOTE: not offered by the current implementation
+	async fn open_substream(&mut self, peer: PeerId) -> Result<(), ()>;
+
+	/// Instruct `Notifications` to close substream for `peer`.
+	//
+	// NOTE: not offered by the current implementation
+	async fn close_substream(&mut self, peer: PeerId) -> Result<(), ()>;
+
+	/// Send synchronous `notification` to `peer`.
+	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>);
+
+	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
+	///
+	/// Returns an error if the peer doesn't exist.
+	async fn send_async_notification(
+		&self,
+		peer: &PeerId,
+		notification: Vec<u8>,
+	) -> Result<(), error::Error>;
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	async fn set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()>;
+
+	/// Non-blocking variant of `set_handshake()` that attempts to update the handshake
+	/// and returns an error if the channel is blocked.
+	///
+	/// Technically the function can return an error if the channel to `Notifications` is closed
+	/// but that doesn't happen under normal operation.
+	fn try_set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()>;
+
+	/// Get next event from the `Notifications` event stream.
+	async fn next_event(&mut self) -> Option<NotificationEvent>;
+
+	/// Make a copy of the object so it can be shared between protocol components
+	/// who wish to have access to the same underlying notification protocol.
+	fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()>;
+
+	/// Get protocol name of the `NotificationService`.
+	fn protocol(&self) -> &ProtocolName;
+
+	/// Get message sink of the peer.
+	fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>>;
+}
+
+/// Message sink for peers.
+///
+/// If protocol cannot use [`NotificationService`] to send notifications to peers and requires,
+/// e.g., notifications to be sent in another task, the protocol may acquire a [`MessageSink`]
+/// object for each peer by calling [`NotificationService::message_sink()`]. Calling this
+/// function returns an object which allows the protocol to send notifications to the remote peer.
+///
+/// Use of this API is discouraged as it's not as performant as sending notifications through
+/// [`NotificationService`] due to synchronization required to keep the underlying notification
+/// sink up to date with possible sink replacement events.
+#[async_trait::async_trait]
+pub trait MessageSink: Send + Sync {
+	/// Send synchronous `notification` to the peer associated with this [`MessageSink`].
+	fn send_sync_notification(&self, notification: Vec<u8>);
+
+	/// Send an asynchronous `notification` to to the peer associated with this [`MessageSink`],
+	/// allowing sender to exercise backpressure.
+	///
+	/// Returns an error if the peer does not exist.
+	async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), error::Error>;
+}
diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs
index 69d4faa13ef..5187e681d83 100644
--- a/substrate/client/network/statement/src/lib.rs
+++ b/substrate/client/network/statement/src/lib.rs
@@ -21,12 +21,13 @@
 //! Usage:
 //!
 //! - Use [`StatementHandlerPrototype::new`] to create a prototype.
-//! - Pass the return value of [`StatementHandlerPrototype::set_config`] to the network
-//! configuration as an extra peers set.
+//! - Pass the `NonDefaultSetConfig` returned from [`StatementHandlerPrototype::new`] to the network
+//!   configuration as an extra peers set.
 //! - Use [`StatementHandlerPrototype::build`] then [`StatementHandler::run`] to obtain a
 //! `Future` that processes statements.
 
 use crate::config::*;
+
 use codec::{Decode, Encode};
 use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt};
 use libp2p::{multiaddr, PeerId};
@@ -34,7 +35,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig},
 	error,
-	event::Event,
+	service::traits::{NotificationEvent, NotificationService, ValidationResult},
 	types::ProtocolName,
 	utils::{interval, LruHashSet},
 	NetworkEventStream, NetworkNotification, NetworkPeers,
@@ -101,35 +102,35 @@ impl Metrics {
 /// Prototype for a [`StatementHandler`].
 pub struct StatementHandlerPrototype {
 	protocol_name: ProtocolName,
+	notification_service: Box<dyn NotificationService>,
 }
 
 impl StatementHandlerPrototype {
 	/// Create a new instance.
-	pub fn new<Hash: AsRef<[u8]>>(genesis_hash: Hash, fork_id: Option<&str>) -> Self {
+	pub fn new<Hash: AsRef<[u8]>>(
+		genesis_hash: Hash,
+		fork_id: Option<&str>,
+	) -> (Self, NonDefaultSetConfig) {
 		let genesis_hash = genesis_hash.as_ref();
 		let protocol_name = if let Some(fork_id) = fork_id {
 			format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id)
 		} else {
 			format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash))
 		};
-
-		Self { protocol_name: protocol_name.into() }
-	}
-
-	/// Returns the configuration of the set to put in the network configuration.
-	pub fn set_config(&self) -> NonDefaultSetConfig {
-		NonDefaultSetConfig {
-			notifications_protocol: self.protocol_name.clone(),
-			fallback_names: Vec::new(),
-			max_notification_size: MAX_STATEMENT_SIZE,
-			handshake: None,
-			set_config: SetConfig {
+		let (config, notification_service) = NonDefaultSetConfig::new(
+			protocol_name.clone().into(),
+			Vec::new(),
+			MAX_STATEMENT_SIZE,
+			None,
+			SetConfig {
 				in_peers: 0,
 				out_peers: 0,
 				reserved_nodes: Vec::new(),
 				non_reserved_mode: NonReservedPeerMode::Deny,
 			},
-		}
+		);
+
+		(Self { protocol_name: protocol_name.into(), notification_service }, config)
 	}
 
 	/// Turns the prototype into the actual handler.
@@ -147,7 +148,6 @@ impl StatementHandlerPrototype {
 		metrics_registry: Option<&Registry>,
 		executor: impl Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send,
 	) -> error::Result<StatementHandler<N, S>> {
-		let net_event_stream = network.event_stream("statement-handler-net");
 		let sync_event_stream = sync.event_stream("statement-handler-sync");
 		let (queue_sender, mut queue_receiver) = async_channel::bounded(100_000);
 
@@ -176,6 +176,7 @@ impl StatementHandlerPrototype {
 
 		let handler = StatementHandler {
 			protocol_name: self.protocol_name,
+			notification_service: self.notification_service,
 			propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT))
 				as Pin<Box<dyn Stream<Item = ()> + Send>>)
 				.fuse(),
@@ -183,7 +184,6 @@ impl StatementHandlerPrototype {
 			pending_statements_peers: HashMap::new(),
 			network,
 			sync,
-			net_event_stream: net_event_stream.fuse(),
 			sync_event_stream: sync_event_stream.fuse(),
 			peers: HashMap::new(),
 			statement_store,
@@ -219,10 +219,10 @@ pub struct StatementHandler<
 	network: N,
 	/// Syncing service.
 	sync: S,
-	/// Stream of networking events.
-	net_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = Event> + Send>>>,
 	/// Receiver for syncing-related events.
 	sync_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = SyncEvent> + Send>>>,
+	/// Notification service.
+	notification_service: Box<dyn NotificationService>,
 	// All connected peers
 	peers: HashMap<PeerId, Peer>,
 	statement_store: Arc<dyn StatementStore>,
@@ -261,14 +261,6 @@ where
 						log::warn!(target: LOG_TARGET, "Inconsistent state, no peers for pending statement!");
 					}
 				},
-				network_event = self.net_event_stream.next() => {
-					if let Some(network_event) = network_event {
-						self.handle_network_event(network_event).await;
-					} else {
-						// Networking has seemingly closed. Closing as well.
-						return;
-					}
-				},
 				sync_event = self.sync_event_stream.next() => {
 					if let Some(sync_event) = sync_event {
 						self.handle_sync_event(sync_event);
@@ -277,6 +269,14 @@ where
 						return;
 					}
 				}
+				event = self.notification_service.next_event().fuse() => {
+					if let Some(event) = event {
+						self.handle_notification_event(event)
+					} else {
+						// `Notifications` has seemingly closed. Closing as well.
+						return
+					}
+				}
 			}
 		}
 	}
@@ -306,14 +306,24 @@ where
 		}
 	}
 
-	async fn handle_network_event(&mut self, event: Event) {
+	fn handle_notification_event(&mut self, event: NotificationEvent) {
 		match event {
-			Event::Dht(_) => {},
-			Event::NotificationStreamOpened { remote, protocol, role, .. }
-				if protocol == self.protocol_name =>
-			{
+			NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => {
+				// only accept peers whose role can be determined
+				let result = self
+					.network
+					.peer_role(peer, handshake)
+					.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+				let _ = result_tx.send(result);
+			},
+			NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => {
+				let Some(role) = self.network.peer_role(peer, handshake) else {
+					log::debug!(target: LOG_TARGET, "role for {peer} couldn't be determined");
+					return
+				};
+
 				let _was_in = self.peers.insert(
-					remote,
+					peer,
 					Peer {
 						known_statements: LruHashSet::new(
 							NonZeroUsize::new(MAX_KNOWN_STATEMENTS).expect("Constant is nonzero"),
@@ -323,39 +333,26 @@ where
 				);
 				debug_assert!(_was_in.is_none());
 			},
-			Event::NotificationStreamClosed { remote, protocol }
-				if protocol == self.protocol_name =>
-			{
-				let _peer = self.peers.remove(&remote);
+			NotificationEvent::NotificationStreamClosed { peer } => {
+				let _peer = self.peers.remove(&peer);
 				debug_assert!(_peer.is_some());
 			},
+			NotificationEvent::NotificationReceived { peer, notification } => {
+				// Accept statements only when node is not major syncing
+				if self.sync.is_major_syncing() {
+					log::trace!(
+						target: LOG_TARGET,
+						"{peer}: Ignoring statements while major syncing or offline"
+					);
+					return
+				}
 
-			Event::NotificationsReceived { remote, messages } => {
-				for (protocol, message) in messages {
-					if protocol != self.protocol_name {
-						continue
-					}
-					// Accept statements only when node is not major syncing
-					if self.sync.is_major_syncing() {
-						log::trace!(
-							target: LOG_TARGET,
-							"{remote}: Ignoring statements while major syncing or offline"
-						);
-						continue
-					}
-					if let Ok(statements) = <Statements as Decode>::decode(&mut message.as_ref()) {
-						self.on_statements(remote, statements);
-					} else {
-						log::debug!(
-							target: LOG_TARGET,
-							"Failed to decode statement list from {remote}"
-						);
-					}
+				if let Ok(statements) = <Statements as Decode>::decode(&mut notification.as_ref()) {
+					self.on_statements(peer, statements);
+				} else {
+					log::debug!(target: LOG_TARGET, "Failed to decode statement list from {peer}");
 				}
 			},
-
-			// Not our concern.
-			Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {},
 		}
 	}
 
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
index 2cb8eab22f7..d7b024cd801 100644
--- a/substrate/client/network/sync/src/engine.rs
+++ b/substrate/client/network/sync/src/engine.rs
@@ -38,7 +38,7 @@ use crate::{
 	warp::{EncodedProof, WarpProofRequest, WarpSyncParams},
 };
 
-use codec::{Decode, Encode};
+use codec::{Decode, DecodeAll, Encode};
 use futures::{
 	channel::oneshot,
 	future::{BoxFuture, Fuse},
@@ -61,9 +61,12 @@ use sc_network::{
 		FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake,
 		ProtocolId, SetConfig,
 	},
+	peer_store::{PeerStoreHandle, PeerStoreProvider},
 	request_responses::{IfDisconnected, RequestFailure},
+	service::traits::{Direction, NotificationEvent, ValidationResult},
+	types::ProtocolName,
 	utils::LruHashSet,
-	NotificationsSink, ProtocolName, ReputationChange,
+	NotificationService, ReputationChange,
 };
 use sc_network_common::{
 	role::Roles,
@@ -88,15 +91,15 @@ use std::{
 	time::{Duration, Instant},
 };
 
-/// Log target for this file.
-const LOG_TARGET: &'static str = "sync";
-
 /// Interval at which we perform time based maintenance
 const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100);
 
 /// Maximum number of known block hashes to keep for a peer.
 const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sync";
+
 /// If the block announces stream to peer has been inactive for 30 seconds meaning local node
 /// has not sent or received block announcements to/from the peer, report the node for inactivity,
 /// disconnect it and attempt to establish connection to some other peer.
@@ -226,8 +229,6 @@ pub struct Peer<B: BlockT> {
 	pub info: ExtendedPeerInfo<B>,
 	/// Holds a set of blocks known to this peer.
 	pub known_blocks: LruHashSet<B::Hash>,
-	/// Notification sink.
-	sink: NotificationsSink,
 	/// Is the peer inbound.
 	inbound: bool,
 }
@@ -252,9 +253,6 @@ pub struct SyncingEngine<B: BlockT, Client> {
 	/// Channel for receiving service commands
 	service_rx: TracingUnboundedReceiver<ToServiceCommand<B>>,
 
-	/// Channel for receiving inbound connections from `Protocol`.
-	sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver<sc_network::SyncEvent<B>>,
-
 	/// Assigned roles.
 	roles: Roles,
 
@@ -312,12 +310,18 @@ pub struct SyncingEngine<B: BlockT, Client> {
 	/// Prometheus metrics.
 	metrics: Option<Metrics>,
 
+	/// Handle that is used to communicate with `sc_network::Notifications`.
+	notification_service: Box<dyn NotificationService>,
+
 	/// When the syncing was started.
 	///
 	/// Stored as an `Option<Instant>` so once the initial wait has passed, `SyncingEngine`
 	/// can reset the peer timers and continue with the normal eviction process.
 	syncing_started: Option<Instant>,
 
+	/// Handle to `PeerStore`.
+	peer_store_handle: PeerStoreHandle,
+
 	/// Instant when the last notification was sent or received.
 	last_notification_io: Instant,
 
@@ -362,7 +366,7 @@ where
 		block_downloader: Arc<dyn BlockDownloader<B>>,
 		state_request_protocol_name: ProtocolName,
 		warp_sync_protocol_name: Option<ProtocolName>,
-		sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver<sc_network::SyncEvent<B>>,
+		peer_store_handle: PeerStoreHandle,
 	) -> Result<(Self, SyncingService<B>, NonDefaultSetConfig), ClientError> {
 		let mode = net_config.network_config.sync_mode;
 		let max_parallel_downloads = net_config.network_config.max_parallel_downloads;
@@ -387,7 +391,7 @@ where
 			}
 			for config in net_config.notification_protocols() {
 				let peer_ids = config
-					.set_config
+					.set_config()
 					.reserved_nodes
 					.iter()
 					.map(|info| info.peer_id)
@@ -438,7 +442,7 @@ where
 		let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx
 			.map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse());
 
-		let block_announce_config = Self::get_block_announce_proto_config(
+		let (block_announce_config, notification_service) = Self::get_block_announce_proto_config(
 			protocol_id,
 			fork_id,
 			roles,
@@ -450,7 +454,6 @@ where
 				.flatten()
 				.expect("Genesis block exists; qed"),
 		);
-		let block_announce_protocol_name = block_announce_config.notifications_protocol.clone();
 
 		let chain_sync = ChainSync::new(
 			mode,
@@ -460,6 +463,7 @@ where
 			warp_sync_config,
 		)?;
 
+		let block_announce_protocol_name = block_announce_config.protocol_name().clone();
 		let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000);
 		let num_connected = Arc::new(AtomicUsize::new(0));
 		let is_major_syncing = Arc::new(AtomicBool::new(false));
@@ -496,7 +500,6 @@ where
 				num_connected: num_connected.clone(),
 				is_major_syncing: is_major_syncing.clone(),
 				service_rx,
-				sync_events_rx,
 				genesis_hash,
 				important_peers,
 				default_peers_set_no_slot_connected_peers: HashSet::new(),
@@ -508,8 +511,10 @@ where
 				num_in_peers: 0usize,
 				max_in_peers,
 				event_streams: Vec::new(),
+				notification_service,
 				tick_timeout,
 				syncing_started: None,
+				peer_store_handle,
 				last_notification_io: Instant::now(),
 				metrics: if let Some(r) = metrics_registry {
 					match Metrics::register(r, is_major_syncing.clone()) {
@@ -673,23 +678,11 @@ where
 				};
 
 				self.last_notification_io = Instant::now();
-				peer.sink.send_sync_notification(message.encode());
+				let _ = self.notification_service.send_sync_notification(peer_id, message.encode());
 			}
 		}
 	}
 
-	/// Inform sync about new best imported block.
-	pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor<B>) {
-		log::debug!(target: LOG_TARGET, "New best block imported {hash:?}/#{number}");
-
-		self.chain_sync.update_chain_info(&hash, number);
-		self.network_service.set_notification_handshake(
-			self.block_announce_protocol_name.clone(),
-			BlockAnnouncesHandshake::<B>::build(self.roles, number, hash, self.genesis_hash)
-				.encode(),
-		)
-	}
-
 	pub async fn run(mut self) {
 		self.syncing_started = Some(Instant::now());
 
@@ -698,8 +691,10 @@ where
 				_ = self.tick_timeout.tick() => self.perform_periodic_actions(),
 				command = self.service_rx.select_next_some() =>
 					self.process_service_command(command),
-				sync_event = self.sync_events_rx.select_next_some() =>
-					self.process_sync_event(sync_event),
+				notification_event = self.notification_service.next_event() => match notification_event {
+					Some(event) => self.process_notification_event(event),
+					None => return,
+				},
 				warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused =>
 					self.pass_warp_sync_target_block_header(warp_target_block_header),
 				response_event = self.pending_responses.select_next_some() =>
@@ -853,8 +848,20 @@ where
 				}
 			},
 			ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data),
-			ToServiceCommand::NewBestBlockImported(hash, number) =>
-				self.new_best_block_imported(hash, number),
+			ToServiceCommand::NewBestBlockImported(hash, number) => {
+				log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number);
+
+				self.chain_sync.update_chain_info(&hash, number);
+				let _ = self.notification_service.try_set_handshake(
+					BlockAnnouncesHandshake::<B>::build(
+						self.roles,
+						number,
+						hash,
+						self.genesis_hash,
+					)
+					.encode(),
+				);
+			},
 			ToServiceCommand::Status(tx) => {
 				let mut status = self.chain_sync.status();
 				status.num_connected_peers = self.peers.len() as u32;
@@ -894,56 +901,60 @@ where
 		}
 	}
 
-	fn process_sync_event(&mut self, event: sc_network::SyncEvent<B>) {
+	fn process_notification_event(&mut self, event: NotificationEvent) {
 		match event {
-			sc_network::SyncEvent::NotificationStreamOpened {
-				remote,
-				received_handshake,
-				sink,
-				inbound,
-				tx,
-			} => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) {
-				Ok(()) => {
-					let _ = tx.send(true);
-				},
-				Err(()) => {
-					log::debug!(
-						target: LOG_TARGET,
-						"Failed to register peer {remote:?}: {received_handshake:?}",
-					);
-					let _ = tx.send(false);
-				},
-			},
-			sc_network::SyncEvent::NotificationStreamClosed { remote } => {
-				if self.on_sync_peer_disconnected(remote).is_err() {
-					log::trace!(
-						target: LOG_TARGET,
-						"Disconnected peer which had earlier been refused by on_sync_peer_connected {}",
-						remote
-					);
-				}
+			NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => {
+				let validation_result = self
+					.validate_connection(&peer, handshake, Direction::Inbound)
+					.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+
+				let _ = result_tx.send(validation_result);
 			},
-			sc_network::SyncEvent::NotificationsReceived { remote, messages } => {
-				for message in messages {
-					if self.peers.contains_key(&remote) {
-						if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) {
-							self.last_notification_io = Instant::now();
-							self.push_block_announce_validation(remote, announce);
-						} else {
-							log::warn!(target: "sub-libp2p", "Failed to decode block announce");
+			NotificationEvent::NotificationStreamOpened { peer, handshake, direction, .. } => {
+				log::debug!(
+					target: LOG_TARGET,
+					"Substream opened for {peer}, handshake {handshake:?}"
+				);
+
+				match self.validate_connection(&peer, handshake, direction) {
+					Ok(handshake) => {
+						if self.on_sync_peer_connected(peer, &handshake, direction).is_err() {
+							log::debug!(target: LOG_TARGET, "Failed to register peer {peer}");
+							self.network_service
+								.disconnect_peer(peer, self.block_announce_protocol_name.clone());
 						}
-					} else {
-						log::trace!(
-							target: LOG_TARGET,
-							"Received sync for peer earlier refused by sync layer: {remote}",
-						);
-					}
+					},
+					Err(wrong_genesis) => {
+						log::debug!(target: LOG_TARGET, "`SyncingEngine` rejected {peer}");
+
+						if wrong_genesis {
+							self.peer_store_handle.report_peer(peer, rep::GENESIS_MISMATCH);
+						}
+
+						self.network_service
+							.disconnect_peer(peer, self.block_announce_protocol_name.clone());
+					},
 				}
 			},
-			sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => {
-				if let Some(peer) = self.peers.get_mut(&remote) {
-					peer.sink = sink;
+			NotificationEvent::NotificationStreamClosed { peer } => {
+				self.on_sync_peer_disconnected(peer);
+			},
+			NotificationEvent::NotificationReceived { peer, notification } => {
+				if !self.peers.contains_key(&peer) {
+					log::error!(
+						target: LOG_TARGET,
+						"received notification from {peer} who had been earlier refused by `SyncingEngine`",
+					);
+					return
 				}
+
+				let Ok(announce) = BlockAnnounce::decode(&mut notification.as_ref()) else {
+					log::warn!(target: LOG_TARGET, "failed to decode block announce");
+					return
+				};
+
+				self.last_notification_io = Instant::now();
+				self.push_block_announce_validation(peer, announce);
 			},
 		}
 	}
@@ -965,129 +976,167 @@ where
 	/// Called by peer when it is disconnecting.
 	///
 	/// Returns a result if the handshake of this peer was indeed accepted.
-	fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> {
-		if let Some(info) = self.peers.remove(&peer_id) {
-			if self.important_peers.contains(&peer_id) {
-				log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected");
-			} else {
-				log::debug!(target: LOG_TARGET, "{peer_id} disconnected");
-			}
-
-			if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) &&
-				info.inbound && info.info.roles.is_full()
-			{
-				match self.num_in_peers.checked_sub(1) {
-					Some(value) => {
-						self.num_in_peers = value;
-					},
-					None => {
-						log::error!(
-							target: LOG_TARGET,
-							"trying to disconnect an inbound node which is not counted as inbound"
-						);
-						debug_assert!(false);
-					},
-				}
-			}
-
-			self.chain_sync.peer_disconnected(&peer_id);
+	fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) {
+		let Some(info) = self.peers.remove(&peer_id) else {
+			log::debug!(target: LOG_TARGET, "{peer_id} does not exist in `SyncingEngine`");
+			return
+		};
 
-			self.pending_responses.remove(&peer_id);
-			self.event_streams.retain(|stream| {
-				stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok()
-			});
-			Ok(())
+		if self.important_peers.contains(&peer_id) {
+			log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected");
 		} else {
-			Err(())
+			log::debug!(target: LOG_TARGET, "{peer_id} disconnected");
 		}
-	}
 
-	/// Called on the first connection between two peers on the default set, after their exchange
-	/// of handshake.
-	///
-	/// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync
-	/// from.
-	fn on_sync_peer_connected(
-		&mut self,
-		peer_id: PeerId,
-		status: &BlockAnnouncesHandshake<B>,
-		sink: NotificationsSink,
-		inbound: bool,
-	) -> Result<(), ()> {
-		log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}");
-
-		if self.peers.contains_key(&peer_id) {
-			log::error!(
-				target: LOG_TARGET,
-				"Called on_sync_peer_connected with already connected peer {peer_id}",
-			);
-			debug_assert!(false);
-			return Err(())
+		if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) &&
+			info.inbound && info.info.roles.is_full()
+		{
+			match self.num_in_peers.checked_sub(1) {
+				Some(value) => {
+					self.num_in_peers = value;
+				},
+				None => {
+					log::error!(
+						target: LOG_TARGET,
+						"trying to disconnect an inbound node which is not counted as inbound"
+					);
+					debug_assert!(false);
+				},
+			}
 		}
 
-		if status.genesis_hash != self.genesis_hash {
-			self.network_service.report_peer(peer_id, rep::GENESIS_MISMATCH);
+		self.chain_sync.peer_disconnected(&peer_id);
+		self.pending_responses.remove(&peer_id);
+		self.event_streams
+			.retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok());
+	}
 
+	/// Validate received handshake.
+	fn validate_handshake(
+		&mut self,
+		peer_id: &PeerId,
+		handshake: Vec<u8>,
+	) -> Result<BlockAnnouncesHandshake<B>, bool> {
+		log::trace!(target: LOG_TARGET, "Validate handshake for {peer_id}");
+
+		let handshake = <BlockAnnouncesHandshake<B> as DecodeAll>::decode_all(&mut &handshake[..])
+			.map_err(|error| {
+				log::debug!(target: LOG_TARGET, "Failed to decode handshake for {peer_id}: {error:?}");
+				false
+			})?;
+
+		if handshake.genesis_hash != self.genesis_hash {
 			if self.important_peers.contains(&peer_id) {
 				log::error!(
 					target: LOG_TARGET,
-					"Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})",
-					peer_id,
+					"Reserved peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})",
 					self.genesis_hash,
-					status.genesis_hash,
+					handshake.genesis_hash,
 				);
 			} else if self.boot_node_ids.contains(&peer_id) {
 				log::error!(
 					target: LOG_TARGET,
-					"Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})",
-					peer_id,
+					"Bootnode with peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})",
 					self.genesis_hash,
-					status.genesis_hash,
+					handshake.genesis_hash,
 				);
 			} else {
 				log::debug!(
 					target: LOG_TARGET,
 					"Peer is on different chain (our genesis: {} theirs: {})",
-					self.genesis_hash, status.genesis_hash
+					self.genesis_hash,
+					handshake.genesis_hash
 				);
 			}
 
-			return Err(())
+			return Err(true)
 		}
 
-		let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id);
-		let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };
+		Ok(handshake)
+	}
 
-		// make sure to accept no more than `--in-peers` many full nodes
-		if !no_slot_peer &&
-			status.roles.is_full() &&
-			inbound && self.num_in_peers == self.max_in_peers
-		{
-			log::debug!(
+	/// Validate connection.
+	// NOTE Returning `Err(bool)` is a really ugly hack to work around the issue
+	// that `ProtocolController` thinks the peer is connected when in fact it can
+	// still be under validation. If the peer has different genesis than the
+	// local node the validation fails but the peer cannot be reported in
+	// `validate_connection()` as that is also called by
+	// `ValiateInboundSubstream` which means that the peer is still being
+	// validated and banning the peer when handling that event would
+	// result in peer getting dropped twice.
+	//
+	// The proper way to fix this is to integrate `ProtocolController` more
+	// tightly with `NotificationService` or add an additional API call for
+	// banning pre-accepted peers (which is not desirable)
+	fn validate_connection(
+		&mut self,
+		peer_id: &PeerId,
+		handshake: Vec<u8>,
+		direction: Direction,
+	) -> Result<BlockAnnouncesHandshake<B>, bool> {
+		log::trace!(target: LOG_TARGET, "New peer {peer_id} {handshake:?}");
+
+		let handshake = self.validate_handshake(peer_id, handshake)?;
+
+		if self.peers.contains_key(&peer_id) {
+			log::error!(
 				target: LOG_TARGET,
-				"All inbound slots have been consumed, rejecting {peer_id}",
+				"Called `validate_connection()` with already connected peer {peer_id}",
 			);
-			return Err(())
+			debug_assert!(false);
+			return Err(false)
 		}
 
-		if status.roles.is_full() &&
+		let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id);
+		let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };
+
+		if handshake.roles.is_full() &&
 			self.chain_sync.num_peers() >=
 				self.default_peers_set_num_full +
 					self.default_peers_set_no_slot_connected_peers.len() +
 					this_peer_reserved_slot
 		{
 			log::debug!(target: LOG_TARGET, "Too many full nodes, rejecting {peer_id}");
-			return Err(())
+			return Err(false)
 		}
 
-		if status.roles.is_light() &&
+		// make sure to accept no more than `--in-peers` many full nodes
+		if !no_slot_peer &&
+			handshake.roles.is_full() &&
+			direction.is_inbound() &&
+			self.num_in_peers == self.max_in_peers
+		{
+			log::debug!(target: LOG_TARGET, "All inbound slots have been consumed, rejecting {peer_id}");
+			return Err(false)
+		}
+
+		// make sure that all slots are not occupied by light peers
+		//
+		// `ChainSync` only accepts full peers whereas `SyncingEngine` accepts both full and light
+		// peers. Verify that there is a slot in `SyncingEngine` for the inbound light peer
+		if handshake.roles.is_light() &&
 			(self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light
 		{
-			// Make sure that not all slots are occupied by light clients.
 			log::debug!(target: LOG_TARGET, "Too many light nodes, rejecting {peer_id}");
-			return Err(())
+			return Err(false)
 		}
 
+		Ok(handshake)
+	}
+
+	/// Called on the first connection between two peers on the default set, after their exchange
+	/// of handshake.
+	///
+	/// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync
+	/// from.
+	fn on_sync_peer_connected(
+		&mut self,
+		peer_id: PeerId,
+		status: &BlockAnnouncesHandshake<B>,
+		direction: Direction,
+	) -> Result<(), ()> {
+		log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}");
+
 		let peer = Peer {
 			info: ExtendedPeerInfo {
 				roles: status.roles,
@@ -1097,8 +1146,7 @@ where
 			known_blocks: LruHashSet::new(
 				NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"),
 			),
-			sink,
-			inbound,
+			inbound: direction.is_inbound(),
 		};
 
 		self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number);
@@ -1106,10 +1154,11 @@ where
 		log::debug!(target: LOG_TARGET, "Connected {peer_id}");
 
 		self.peers.insert(peer_id, peer);
+		self.peer_store_handle.set_peer_role(&peer_id, status.roles.into());
 
-		if no_slot_peer {
+		if self.default_peers_set_no_slot_peers.contains(&peer_id) {
 			self.default_peers_set_no_slot_connected_peers.insert(peer_id);
-		} else if inbound && status.roles.is_full() {
+		} else if direction.is_inbound() && status.roles.is_full() {
 			self.num_in_peers += 1;
 		}
 
@@ -1333,7 +1382,7 @@ where
 		best_number: NumberFor<B>,
 		best_hash: B::Hash,
 		genesis_hash: B::Hash,
-	) -> NonDefaultSetConfig {
+	) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
 		let block_announces_protocol = {
 			let genesis_hash = genesis_hash.as_ref();
 			if let Some(ref fork_id) = fork_id {
@@ -1347,14 +1396,11 @@ where
 			}
 		};
 
-		NonDefaultSetConfig {
-			notifications_protocol: block_announces_protocol.into(),
-			fallback_names: iter::once(
-				format!("/{}/block-announces/1", protocol_id.as_ref()).into(),
-			)
-			.collect(),
-			max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE,
-			handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build(
+		NonDefaultSetConfig::new(
+			block_announces_protocol.into(),
+			iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(),
+			MAX_BLOCK_ANNOUNCE_SIZE,
+			Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build(
 				roles,
 				best_number,
 				best_hash,
@@ -1362,13 +1408,13 @@ where
 			))),
 			// NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement
 			// protocol is still hardcoded into the peerset.
-			set_config: SetConfig {
+			SetConfig {
 				in_peers: 0,
 				out_peers: 0,
 				reserved_nodes: Vec::new(),
 				non_reserved_mode: NonReservedPeerMode::Deny,
 			},
-		}
+		)
 	}
 
 	/// Import blocks.
diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs
index 885eb1f8da5..47986a71d01 100644
--- a/substrate/client/network/sync/src/service/mock.rs
+++ b/substrate/client/network/sync/src/service/mock.rs
@@ -27,6 +27,7 @@ use sc_network::{
 	NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest,
 	NotificationSenderError, NotificationSenderT, ReputationChange,
 };
+use sc_network_common::role::ObservedRole;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
 use std::collections::HashSet;
@@ -105,6 +106,7 @@ mockall::mock! {
 			peers: Vec<PeerId>
 		) -> Result<(), String>;
 		fn sync_num_connected(&self) -> usize;
+		fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole>;
 	}
 
 	#[async_trait::async_trait]
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index cfc3cb7af3f..71f13b74a53 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -58,7 +58,7 @@ use sc_network::{
 	request_responses::ProtocolConfig as RequestResponseConfig,
 	types::ProtocolName,
 	Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest,
-	NetworkWorker,
+	NetworkWorker, NotificationService,
 };
 use sc_network_common::role::Roles;
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -239,6 +239,7 @@ pub struct Peer<D, BlockImport> {
 	imported_blocks_stream: Pin<Box<dyn Stream<Item = BlockImportNotification<Block>> + Send>>,
 	finality_notification_stream: Pin<Box<dyn Stream<Item = FinalityNotification<Block>> + Send>>,
 	listen_addr: Multiaddr,
+	notification_services: HashMap<ProtocolName, Box<dyn NotificationService>>,
 }
 
 impl<D, B> Peer<D, B>
@@ -263,8 +264,8 @@ where
 	}
 
 	/// Returns the number of peers we're connected to.
-	pub fn num_peers(&self) -> usize {
-		self.network.num_connected_peers()
+	pub async fn num_peers(&self) -> usize {
+		self.sync_service.status().await.unwrap().num_connected_peers as usize
 	}
 
 	/// Returns the number of downloaded blocks.
@@ -502,10 +503,19 @@ where
 		self.network.service()
 	}
 
+	/// Get `SyncingService`.
 	pub fn sync_service(&self) -> &Arc<SyncingService<Block>> {
 		&self.sync_service
 	}
 
+	/// Take notification handle for enabled protocol.
+	pub fn take_notification_service(
+		&mut self,
+		protocol: &ProtocolName,
+	) -> Option<Box<dyn NotificationService>> {
+		self.notification_services.remove(protocol)
+	}
+
 	/// Get a reference to the network worker.
 	pub fn network(&self) -> &NetworkWorker<Block, <Block as BlockT>::Hash> {
 		&self.network
@@ -778,6 +788,23 @@ pub trait TestNetFactory: Default + Sized + Send {
 		network_config.transport = TransportConfig::MemoryOnly;
 		network_config.listen_addresses = vec![listen_addr.clone()];
 		network_config.allow_non_globals_in_dht = true;
+
+		let (notif_configs, notif_handles): (Vec<_>, Vec<_>) = config
+			.notifications_protocols
+			.into_iter()
+			.map(|p| {
+				let (config, handle) = NonDefaultSetConfig::new(
+					p.clone(),
+					Vec::new(),
+					1024 * 1024,
+					None,
+					Default::default(),
+				);
+
+				(config, (p, handle))
+			})
+			.unzip();
+
 		if let Some(connect_to) = config.connect_to_peers {
 			let addrs = connect_to
 				.iter()
@@ -849,11 +876,16 @@ pub trait TestNetFactory: Default + Sized + Send {
 			protocol_config
 		};
 
+		let peer_store = PeerStore::new(
+			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
+		);
+		let peer_store_handle = peer_store.handle();
+		self.spawn_task(peer_store.run().boxed());
+
 		let block_announce_validator = config
 			.block_announce_validator
 			.unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator));
 
-		let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
 		let (engine, sync_service, block_announce_config) =
 			sc_network_sync::engine::SyncingEngine::new(
 				Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
@@ -869,7 +901,7 @@ pub trait TestNetFactory: Default + Sized + Send {
 				block_relay_params.downloader,
 				state_request_protocol_config.name.clone(),
 				Some(warp_protocol_config.name.clone()),
-				rx,
+				peer_store_handle.clone(),
 			)
 			.unwrap();
 		let sync_service_import_queue = Box::new(sync_service.clone());
@@ -887,22 +919,10 @@ pub trait TestNetFactory: Default + Sized + Send {
 			full_net_config.add_request_response_protocol(config);
 		}
 
-		for protocol in config.notifications_protocols {
-			full_net_config.add_notification_protocol(NonDefaultSetConfig {
-				notifications_protocol: protocol,
-				fallback_names: Vec::new(),
-				max_notification_size: 1024 * 1024,
-				handshake: None,
-				set_config: Default::default(),
-			});
+		for config in notif_configs {
+			full_net_config.add_notification_protocol(config);
 		}
 
-		let peer_store = PeerStore::new(
-			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
-		);
-		let peer_store_handle = peer_store.handle();
-		self.spawn_task(peer_store.run().boxed());
-
 		let genesis_hash =
 			client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
 		let network = NetworkWorker::new(sc_network::config::Params {
@@ -917,7 +937,6 @@ pub trait TestNetFactory: Default + Sized + Send {
 			fork_id,
 			metrics_registry: None,
 			block_announce_config,
-			tx,
 		})
 		.unwrap();
 
@@ -953,6 +972,7 @@ pub trait TestNetFactory: Default + Sized + Send {
 				backend: Some(backend),
 				imported_blocks_stream,
 				finality_notification_stream,
+				notification_services: HashMap::from_iter(notif_handles.into_iter()),
 				block_import,
 				verifier,
 				network,
@@ -967,20 +987,6 @@ pub trait TestNetFactory: Default + Sized + Send {
 		tokio::spawn(f);
 	}
 
-	/// Polls the testnet until all peers are connected to each other.
-	///
-	/// Must be executed in a task context.
-	fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> {
-		self.poll(cx);
-
-		let num_peers = self.peers().len();
-		if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) {
-			return Poll::Ready(())
-		}
-
-		Poll::Pending
-	}
-
 	async fn is_in_sync(&mut self) -> bool {
 		let mut highest = None;
 		let peers = self.peers_mut();
@@ -1058,10 +1064,27 @@ pub trait TestNetFactory: Default + Sized + Send {
 	}
 
 	/// Run the network until all peers are connected to each other.
-	///
-	/// Calls `poll_until_connected` repeatedly with the runtime passed as parameter.
 	async fn run_until_connected(&mut self) {
-		futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await;
+		let num_peers = self.peers().len();
+		let sync_services =
+			self.peers().iter().map(|info| info.sync_service.clone()).collect::<Vec<_>>();
+
+		'outer: loop {
+			for sync_service in &sync_services {
+				if sync_service.status().await.unwrap().num_connected_peers as usize !=
+					num_peers - 1
+				{
+					futures::future::poll_fn::<(), _>(|cx| {
+						self.poll(cx);
+						Poll::Ready(())
+					})
+					.await;
+					continue 'outer
+				}
+			}
+
+			break
+		}
 	}
 
 	/// Polls the testnet. Processes all the pending actions.
diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs
index 62d7f9f9d1b..800c0d4369c 100644
--- a/substrate/client/network/test/src/service.rs
+++ b/substrate/client/network/test/src/service.rs
@@ -24,8 +24,9 @@ use sc_network::{
 	config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig},
 	event::Event,
 	peer_store::PeerStore,
-	NetworkEventStream, NetworkNotification, NetworkPeers, NetworkService, NetworkStateInfo,
-	NetworkWorker,
+	service::traits::{NotificationEvent, ValidationResult},
+	NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker,
+	NotificationService,
 };
 use sc_network_common::role::Roles;
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -116,7 +117,7 @@ impl TestNetworkBuilder {
 		self
 	}
 
-	pub fn build(mut self) -> TestNetwork {
+	pub fn build(mut self) -> (TestNetwork, Option<Box<dyn NotificationService>>) {
 		let client = self.client.as_mut().map_or(
 			Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0),
 			|v| v.clone(),
@@ -183,7 +184,12 @@ impl TestNetworkBuilder {
 			protocol_config
 		};
 
-		let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
+		let peer_store = PeerStore::new(
+			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
+		);
+		let peer_store_handle = peer_store.handle();
+		tokio::spawn(peer_store.run().boxed());
+
 		let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new(
 			Roles::from(&config::Role::Full),
 			client.clone(),
@@ -198,24 +204,27 @@ impl TestNetworkBuilder {
 			block_relay_params.downloader,
 			state_request_protocol_config.name.clone(),
 			None,
-			rx,
+			peer_store_handle.clone(),
 		)
 		.unwrap();
 		let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone()));
 
-		if !self.notification_protocols.is_empty() {
+		let handle = if !self.notification_protocols.is_empty() {
 			for config in self.notification_protocols {
 				full_net_config.add_notification_protocol(config);
 			}
+			None
 		} else {
-			full_net_config.add_notification_protocol(config::NonDefaultSetConfig {
-				notifications_protocol: PROTOCOL_NAME.into(),
-				fallback_names: Vec::new(),
-				max_notification_size: 1024 * 1024,
-				handshake: None,
-				set_config: self.set_config.unwrap_or_default(),
-			});
-		}
+			let (config, handle) = config::NonDefaultSetConfig::new(
+				PROTOCOL_NAME.into(),
+				Vec::new(),
+				1024 * 1024,
+				None,
+				self.set_config.unwrap_or_default(),
+			);
+			full_net_config.add_notification_protocol(config);
+			Some(handle)
+		};
 
 		for config in [
 			block_relay_params.request_response_config,
@@ -225,12 +234,6 @@ impl TestNetworkBuilder {
 			full_net_config.add_request_response_protocol(config);
 		}
 
-		let peer_store = PeerStore::new(
-			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
-		);
-		let peer_store_handle = peer_store.handle();
-		tokio::spawn(peer_store.run().boxed());
-
 		let genesis_hash =
 			client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
 		let worker = NetworkWorker::<
@@ -248,7 +251,6 @@ impl TestNetworkBuilder {
 			protocol_id,
 			fork_id,
 			metrics_registry: None,
-			tx,
 		})
 		.unwrap();
 
@@ -268,7 +270,7 @@ impl TestNetworkBuilder {
 		});
 		tokio::spawn(engine.run());
 
-		TestNetwork::new(worker)
+		(TestNetwork::new(worker), handle)
 	}
 }
 
@@ -276,18 +278,18 @@ impl TestNetworkBuilder {
 /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered.
 fn build_nodes_one_proto() -> (
 	Arc<TestNetworkService>,
-	impl Stream<Item = Event>,
+	Option<Box<dyn NotificationService>>,
 	Arc<TestNetworkService>,
-	impl Stream<Item = Event>,
+	Option<Box<dyn NotificationService>>,
 ) {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let (node1, events_stream1) = TestNetworkBuilder::new()
+	let (network1, handle1) = TestNetworkBuilder::new()
 		.with_listen_addresses(vec![listen_addr.clone()])
-		.build()
-		.start_network();
+		.build();
+	let (node1, _) = network1.start_network();
 
-	let (node2, events_stream2) = TestNetworkBuilder::new()
+	let (network2, handle2) = TestNetworkBuilder::new()
 		.with_set_config(config::SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -295,10 +297,11 @@ fn build_nodes_one_proto() -> (
 			}],
 			..Default::default()
 		})
-		.build()
-		.start_network();
+		.build();
+
+	let (node2, _) = network2.start_network();
 
-	(node1, events_stream1, node2, events_stream2)
+	(node1, handle1, node2, handle2)
 }
 
 #[tokio::test]
@@ -306,22 +309,15 @@ async fn notifications_state_consistent() {
 	// Runs two nodes and ensures that events are propagated out of the API in a consistent
 	// correct order, which means no notification received on a closed substream.
 
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
+	let (node1, handle1, node2, handle2) = build_nodes_one_proto();
+	let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap());
 
 	// Write some initial notifications that shouldn't get through.
 	for _ in 0..(rand::random::<u8>() % 5) {
-		node1.write_notification(
-			node2.local_peer_id(),
-			PROTOCOL_NAME.into(),
-			b"hello world".to_vec(),
-		);
+		let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
 	}
 	for _ in 0..(rand::random::<u8>() % 5) {
-		node2.write_notification(
-			node1.local_peer_id(),
-			PROTOCOL_NAME.into(),
-			b"hello world".to_vec(),
-		);
+		let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
 	}
 
 	// True if we have an active substream from node1 to node2.
@@ -343,18 +339,10 @@ async fn notifications_state_consistent() {
 		// Start by sending a notification from node1 to node2 and vice-versa. Part of the
 		// test consists in ensuring that notifications get ignored if the stream isn't open.
 		if rand::random::<u8>() % 5 >= 3 {
-			node1.write_notification(
-				node2.local_peer_id(),
-				PROTOCOL_NAME.into(),
-				b"hello world".to_vec(),
-			);
+			let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
 		}
 		if rand::random::<u8>() % 5 >= 3 {
-			node2.write_notification(
-				node1.local_peer_id(),
-				PROTOCOL_NAME.into(),
-				b"hello world".to_vec(),
-			);
+			let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
 		}
 
 		// Also randomly disconnect the two nodes from time to time.
@@ -367,8 +355,8 @@ async fn notifications_state_consistent() {
 
 		// Grab next event from either `events_stream1` or `events_stream2`.
 		let next_event = {
-			let next1 = events_stream1.next();
-			let next2 = events_stream2.next();
+			let next1 = handle1.next_event();
+			let next2 = handle2.next_event();
 			// We also await on a small timer, otherwise it is possible for the test to wait
 			// forever while nothing at all happens on the network.
 			let continue_test = futures_timer::Delay::new(Duration::from_millis(20));
@@ -383,58 +371,55 @@ async fn notifications_state_consistent() {
 		};
 
 		match next_event {
-			future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) =>
-				if protocol == PROTOCOL_NAME.into() {
-					something_happened = true;
-					assert!(!node1_to_node2_open);
-					node1_to_node2_open = true;
-					assert_eq!(remote, node2.local_peer_id());
-				},
-			future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) =>
-				if protocol == PROTOCOL_NAME.into() {
-					something_happened = true;
-					assert!(!node2_to_node1_open);
-					node2_to_node1_open = true;
-					assert_eq!(remote, node1.local_peer_id());
-				},
-			future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) =>
-				if protocol == PROTOCOL_NAME.into() {
-					assert!(node1_to_node2_open);
-					node1_to_node2_open = false;
-					assert_eq!(remote, node2.local_peer_id());
-				},
-			future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) =>
-				if protocol == PROTOCOL_NAME.into() {
-					assert!(node2_to_node1_open);
-					node2_to_node1_open = false;
-					assert_eq!(remote, node1.local_peer_id());
-				},
-			future::Either::Left(Event::NotificationsReceived { remote, .. }) => {
+			future::Either::Left(NotificationEvent::ValidateInboundSubstream {
+				result_tx, ..
+			}) => {
+				result_tx.send(ValidationResult::Accept).unwrap();
+			},
+			future::Either::Right(NotificationEvent::ValidateInboundSubstream {
+				result_tx,
+				..
+			}) => {
+				result_tx.send(ValidationResult::Accept).unwrap();
+			},
+			future::Either::Left(NotificationEvent::NotificationStreamOpened { peer, .. }) => {
+				something_happened = true;
+				assert!(!node1_to_node2_open);
+				node1_to_node2_open = true;
+				assert_eq!(peer, node2.local_peer_id());
+			},
+			future::Either::Right(NotificationEvent::NotificationStreamOpened { peer, .. }) => {
+				something_happened = true;
+				assert!(!node2_to_node1_open);
+				node2_to_node1_open = true;
+				assert_eq!(peer, node1.local_peer_id());
+			},
+			future::Either::Left(NotificationEvent::NotificationStreamClosed { peer, .. }) => {
 				assert!(node1_to_node2_open);
-				assert_eq!(remote, node2.local_peer_id());
+				node1_to_node2_open = false;
+				assert_eq!(peer, node2.local_peer_id());
+			},
+			future::Either::Right(NotificationEvent::NotificationStreamClosed { peer, .. }) => {
+				assert!(node2_to_node1_open);
+				node2_to_node1_open = false;
+				assert_eq!(peer, node1.local_peer_id());
+			},
+			future::Either::Left(NotificationEvent::NotificationReceived { peer, .. }) => {
+				assert!(node1_to_node2_open);
+				assert_eq!(peer, node2.local_peer_id());
 				if rand::random::<u8>() % 5 >= 4 {
-					node1.write_notification(
-						node2.local_peer_id(),
-						PROTOCOL_NAME.into(),
-						b"hello world".to_vec(),
-					);
+					let _ = handle1
+						.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
 				}
 			},
-			future::Either::Right(Event::NotificationsReceived { remote, .. }) => {
+			future::Either::Right(NotificationEvent::NotificationReceived { peer, .. }) => {
 				assert!(node2_to_node1_open);
-				assert_eq!(remote, node1.local_peer_id());
+				assert_eq!(peer, node1.local_peer_id());
 				if rand::random::<u8>() % 5 >= 4 {
-					node2.write_notification(
-						node1.local_peer_id(),
-						PROTOCOL_NAME.into(),
-						b"hello world".to_vec(),
-					);
+					let _ = handle2
+						.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
 				}
 			},
-
-			// Add new events here.
-			future::Either::Left(Event::Dht(_)) => {},
-			future::Either::Right(Event::Dht(_)) => {},
 		};
 	}
 }
@@ -444,20 +429,29 @@ async fn lots_of_incoming_peers_works() {
 	sp_tracing::try_init_simple();
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let (main_node, _) = TestNetworkBuilder::new()
+	let (main_node, handle1) = TestNetworkBuilder::new()
 		.with_listen_addresses(vec![listen_addr.clone()])
 		.with_set_config(config::SetConfig { in_peers: u32::MAX, ..Default::default() })
-		.build()
-		.start_network();
+		.build();
+	let mut handle1 = handle1.unwrap();
+	let (main_node, _) = main_node.start_network();
 
 	let main_node_peer_id = main_node.local_peer_id();
 
+	tokio::spawn(async move {
+		while let Some(event) = handle1.next_event().await {
+			if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event {
+				result_tx.send(ValidationResult::Accept).unwrap();
+			}
+		}
+	});
+
 	// We spawn background tasks and push them in this `Vec`. They will all be waited upon before
 	// this test ends.
 	let mut background_tasks_to_wait = Vec::new();
 
 	for _ in 0..32 {
-		let (_dialing_node, event_stream) = TestNetworkBuilder::new()
+		let (dialing_node, handle) = TestNetworkBuilder::new()
 			.with_set_config(config::SetConfig {
 				reserved_nodes: vec![MultiaddrWithPeerId {
 					multiaddr: listen_addr.clone(),
@@ -465,8 +459,9 @@ async fn lots_of_incoming_peers_works() {
 				}],
 				..Default::default()
 			})
-			.build()
-			.start_network();
+			.build();
+		let mut handle = handle.unwrap();
+		let (_, _) = dialing_node.start_network();
 
 		background_tasks_to_wait.push(tokio::spawn(async move {
 			// Create a dummy timer that will "never" fire, and that will be overwritten when we
@@ -474,34 +469,23 @@ async fn lots_of_incoming_peers_works() {
 			// make the code below way more complicated.
 			let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse();
 
-			let mut event_stream = event_stream.fuse();
-			let mut sync_protocol_name = None;
 			loop {
 				futures::select! {
 					_ = timer => {
 						// Test succeeds when timer fires.
 						return;
 					}
-					ev = event_stream.next() => {
-						match ev.unwrap() {
-							Event::NotificationStreamOpened { protocol, remote, .. } => {
-								if let None = sync_protocol_name {
-									sync_protocol_name = Some(protocol.clone());
-								}
-
-								assert_eq!(remote, main_node_peer_id);
-								// Test succeeds after 5 seconds. This timer is here in order to
-								// detect a potential problem after opening.
-								timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse();
-							}
-							Event::NotificationStreamClosed { protocol, .. } => {
-								if Some(protocol) != sync_protocol_name {
-									// Test failed.
-									panic!();
-								}
-							}
-							_ => {}
+					ev = handle.next_event().fuse() => match ev.unwrap() {
+						NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+							result_tx.send(ValidationResult::Accept).unwrap();
 						}
+						NotificationEvent::NotificationStreamOpened { peer, .. } => {
+							assert_eq!(peer, main_node_peer_id);
+							// Test succeeds after 5 seconds. This timer is here in order to
+							// detect a potential problem after opening.
+							timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse();
+						}
+						_ => {}
 					}
 				}
 			}
@@ -518,33 +502,27 @@ async fn notifications_back_pressure() {
 
 	const TOTAL_NOTIFS: usize = 10_000;
 
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
+	let (_node1, handle1, node2, handle2) = build_nodes_one_proto();
+	let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap());
 	let node2_id = node2.local_peer_id();
 
 	let receiver = tokio::spawn(async move {
 		let mut received_notifications = 0;
-		let mut sync_protocol_name = None;
 
 		while received_notifications < TOTAL_NOTIFS {
-			match events_stream2.next().await.unwrap() {
-				Event::NotificationStreamOpened { protocol, .. } => {
-					if let None = sync_protocol_name {
-						sync_protocol_name = Some(protocol);
-					}
+			match handle2.next_event().await.unwrap() {
+				NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+					result_tx.send(ValidationResult::Accept).unwrap();
 				},
-				Event::NotificationStreamClosed { protocol, .. } => {
-					if Some(&protocol) != sync_protocol_name.as_ref() {
-						panic!()
-					}
+				NotificationEvent::NotificationReceived { notification, .. } => {
+					assert_eq!(
+						notification,
+						format!("hello #{}", received_notifications).into_bytes()
+					);
+					received_notifications += 1;
 				},
-				Event::NotificationsReceived { messages, .. } =>
-					for message in messages {
-						assert_eq!(message.0, PROTOCOL_NAME.into());
-						assert_eq!(message.1, format!("hello #{}", received_notifications));
-						received_notifications += 1;
-					},
 				_ => {},
-			};
+			}
 
 			if rand::random::<u8>() < 2 {
 				tokio::time::sleep(Duration::from_millis(rand::random::<u64>() % 750)).await;
@@ -554,20 +532,20 @@ async fn notifications_back_pressure() {
 
 	// Wait for the `NotificationStreamOpened`.
 	loop {
-		match events_stream1.next().await.unwrap() {
-			Event::NotificationStreamOpened { .. } => break,
+		match handle1.next_event().await.unwrap() {
+			NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+				result_tx.send(ValidationResult::Accept).unwrap();
+			},
+			NotificationEvent::NotificationStreamOpened { .. } => break,
 			_ => {},
 		};
 	}
 
 	// Sending!
 	for num in 0..TOTAL_NOTIFS {
-		let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap();
-		notif
-			.ready()
+		handle1
+			.send_async_notification(&node2_id, format!("hello #{}", num).into_bytes())
 			.await
-			.unwrap()
-			.send(format!("hello #{}", num).into_bytes())
 			.unwrap();
 	}
 
@@ -576,28 +554,31 @@ async fn notifications_back_pressure() {
 
 #[tokio::test]
 async fn fallback_name_working() {
+	sp_tracing::try_init_simple();
 	// Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether
 	// they can connect.
 	const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME";
 
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
-	let (node1, mut events_stream1) = TestNetworkBuilder::new()
-		.with_notification_protocol(config::NonDefaultSetConfig {
-			notifications_protocol: NEW_PROTOCOL_NAME.into(),
-			fallback_names: vec![PROTOCOL_NAME.into()],
-			max_notification_size: 1024 * 1024,
-			handshake: None,
-			set_config: Default::default(),
-		})
+	let (config, mut handle1) = config::NonDefaultSetConfig::new(
+		NEW_PROTOCOL_NAME.into(),
+		vec![PROTOCOL_NAME.into()],
+		1024 * 1024,
+		None,
+		Default::default(),
+	);
+	let (network1, _) = TestNetworkBuilder::new()
+		.with_notification_protocol(config)
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			transport: TransportConfig::MemoryOnly,
 			..config::NetworkConfiguration::new_local()
 		})
-		.build()
-		.start_network();
+		.build();
 
-	let (_, mut events_stream2) = TestNetworkBuilder::new()
+	let (node1, _) = network1.start_network();
+
+	let (network2, handle2) = TestNetworkBuilder::new()
 		.with_set_config(config::SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -605,34 +586,38 @@ async fn fallback_name_working() {
 			}],
 			..Default::default()
 		})
-		.build()
-		.start_network();
+		.build();
+	let mut handle2 = handle2.unwrap();
+	let _ = network2.start_network();
 
 	let receiver = tokio::spawn(async move {
 		// Wait for the `NotificationStreamOpened`.
 		loop {
-			match events_stream2.next().await.unwrap() {
-				Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => {
-					assert_eq!(protocol, PROTOCOL_NAME.into());
+			match handle2.next_event().await.unwrap() {
+				NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+					result_tx.send(ValidationResult::Accept).unwrap();
+				},
+				NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => {
 					assert_eq!(negotiated_fallback, None);
 					break
 				},
 				_ => {},
-			};
+			}
 		}
 	});
 
 	// Wait for the `NotificationStreamOpened`.
 	loop {
-		match events_stream1.next().await.unwrap() {
-			Event::NotificationStreamOpened { protocol, negotiated_fallback, .. }
-				if protocol == NEW_PROTOCOL_NAME.into() =>
-			{
+		match handle1.next_event().await.unwrap() {
+			NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+				result_tx.send(ValidationResult::Accept).unwrap();
+			},
+			NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => {
 				assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into()));
 				break
 			},
 			_ => {},
-		};
+		}
 	}
 
 	receiver.await.unwrap();
@@ -655,6 +640,7 @@ async fn ensure_listen_addresses_consistent_with_transport_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -674,6 +660,7 @@ async fn ensure_listen_addresses_consistent_with_transport_not_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -699,6 +686,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -723,6 +711,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -751,6 +740,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -778,6 +768,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -800,6 +791,7 @@ async fn ensure_public_addresses_consistent_with_transport_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
 
@@ -821,5 +813,6 @@ async fn ensure_public_addresses_consistent_with_transport_not_memory() {
 			)
 		})
 		.build()
+		.0
 		.start_network();
 }
diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs
index 389177b4aaf..f2be662ada1 100644
--- a/substrate/client/network/test/src/sync.rs
+++ b/substrate/client/network/test/src/sync.rs
@@ -44,16 +44,16 @@ async fn sync_peers_works() {
 	sp_tracing::try_init_simple();
 	let mut net = TestNet::new(3);
 
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		for peer in 0..3 {
-			if net.peer(peer).num_peers() != 2 {
-				return Poll::Pending
-			}
-		}
-		Poll::Ready(())
-	})
-	.await;
+	while net.peer(0).num_peers().await != 2 &&
+		net.peer(1).num_peers().await != 2 &&
+		net.peer(2).num_peers().await != 2
+	{
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
+			Poll::Ready(())
+		})
+		.await;
+	}
 }
 
 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -412,15 +412,13 @@ async fn can_sync_small_non_best_forks() {
 	assert!(net.peer(1).client().header(small_hash).unwrap().is_none());
 
 	// poll until the two nodes connect, otherwise announcing the block will not work
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
-			Poll::Pending
-		} else {
+	while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
 			Poll::Ready(())
-		}
-	})
-	.await;
+		})
+		.await;
+	}
 
 	// synchronization: 0 synced to longer chain and 1 didn't sync to small chain.
 
@@ -465,6 +463,7 @@ async fn can_sync_forks_ahead_of_the_best_chain() {
 	net.peer(1).push_blocks(1, false);
 
 	net.run_until_connected().await;
+
 	// Peer 0 is on 2-block fork which is announced with is_best=false
 	let fork_hash = net
 		.peer(0)
@@ -516,15 +515,13 @@ async fn can_sync_explicit_forks() {
 	assert!(net.peer(1).client().header(small_hash).unwrap().is_none());
 
 	// poll until the two nodes connect, otherwise announcing the block will not work
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
-			Poll::Pending
-		} else {
+	while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
 			Poll::Ready(())
-		}
-	})
-	.await;
+		})
+		.await;
+	}
 
 	// synchronization: 0 synced to longer chain and 1 didn't sync to small chain.
 
@@ -613,15 +610,14 @@ async fn full_sync_requires_block_body() {
 
 	net.peer(0).push_headers(1);
 	// Wait for nodes to connect
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
-			Poll::Pending
-		} else {
+	while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
 			Poll::Ready(())
-		}
-	})
-	.await;
+		})
+		.await;
+	}
+
 	net.run_until_idle().await;
 	assert_eq!(net.peer(1).client.info().best_number, 0);
 }
@@ -917,18 +913,16 @@ async fn block_announce_data_is_propagated() {
 	});
 
 	// Wait until peer 1 is connected to both nodes.
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		if net.peer(1).num_peers() == 2 &&
-			net.peer(0).num_peers() == 1 &&
-			net.peer(2).num_peers() == 1
-		{
+	while net.peer(1).num_peers().await != 2 ||
+		net.peer(0).num_peers().await != 1 ||
+		net.peer(2).num_peers().await != 1
+	{
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
 			Poll::Ready(())
-		} else {
-			Poll::Pending
-		}
-	})
-	.await;
+		})
+		.await;
+	}
 
 	let block_hash = net
 		.peer(0)
@@ -1010,7 +1004,7 @@ async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
 		tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
 		net.peer(0).push_blocks(1, false);
 		net.run_until_sync().await;
-		assert_eq!(1, net.peer(0).num_peers());
+		assert_eq!(1, net.peer(0).num_peers().await);
 	}
 
 	let hashof10 = hashes[9];
diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs
index 1b97d4b96c9..9758ea4c4fc 100644
--- a/substrate/client/network/transactions/src/lib.rs
+++ b/substrate/client/network/transactions/src/lib.rs
@@ -21,8 +21,8 @@
 //! Usage:
 //!
 //! - Use [`TransactionsHandlerPrototype::new`] to create a prototype.
-//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network
-//! configuration as an extra peers set.
+//! - Pass the `NonDefaultSetConfig` returned from [`TransactionsHandlerPrototype::new`] to the
+//!   network configuration as an extra peers set.
 //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a
 //! `Future` that processes transactions.
 
@@ -37,7 +37,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig},
 	error,
-	event::Event,
+	service::traits::{NotificationEvent, NotificationService, ValidationResult},
 	types::ProtocolName,
 	utils::{interval, LruHashSet},
 	NetworkEventStream, NetworkNotification, NetworkPeers,
@@ -115,8 +115,11 @@ impl<H: ExHashT> Future for PendingTransaction<H> {
 
 /// Prototype for a [`TransactionsHandler`].
 pub struct TransactionsHandlerPrototype {
+	/// Name of the transaction protocol.
 	protocol_name: ProtocolName,
-	fallback_protocol_names: Vec<ProtocolName>,
+
+	/// Handle that is used to communicate with `sc_network::Notifications`.
+	notification_service: Box<dyn NotificationService>,
 }
 
 impl TransactionsHandlerPrototype {
@@ -125,35 +128,28 @@ impl TransactionsHandlerPrototype {
 		protocol_id: ProtocolId,
 		genesis_hash: Hash,
 		fork_id: Option<&str>,
-	) -> Self {
+	) -> (Self, NonDefaultSetConfig) {
 		let genesis_hash = genesis_hash.as_ref();
-		let protocol_name = if let Some(fork_id) = fork_id {
+		let protocol_name: ProtocolName = if let Some(fork_id) = fork_id {
 			format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id)
 		} else {
 			format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash))
-		};
-		let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref());
-
-		Self {
-			protocol_name: protocol_name.into(),
-			fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(),
 		}
-	}
-
-	/// Returns the configuration of the set to put in the network configuration.
-	pub fn set_config(&self) -> NonDefaultSetConfig {
-		NonDefaultSetConfig {
-			notifications_protocol: self.protocol_name.clone(),
-			fallback_names: self.fallback_protocol_names.clone(),
-			max_notification_size: MAX_TRANSACTIONS_SIZE,
-			handshake: None,
-			set_config: SetConfig {
+		.into();
+		let (config, notification_service) = NonDefaultSetConfig::new(
+			protocol_name.clone(),
+			vec![format!("/{}/transactions/1", protocol_id.as_ref()).into()],
+			MAX_TRANSACTIONS_SIZE,
+			None,
+			SetConfig {
 				in_peers: 0,
 				out_peers: 0,
 				reserved_nodes: Vec::new(),
 				non_reserved_mode: NonReservedPeerMode::Deny,
 			},
-		}
+		);
+
+		(Self { protocol_name, notification_service }, config)
 	}
 
 	/// Turns the prototype into the actual handler. Returns a controller that allows controlling
@@ -173,12 +169,12 @@ impl TransactionsHandlerPrototype {
 		transaction_pool: Arc<dyn TransactionPool<H, B>>,
 		metrics_registry: Option<&Registry>,
 	) -> error::Result<(TransactionsHandler<B, H, N, S>, TransactionsHandlerController<H>)> {
-		let net_event_stream = network.event_stream("transactions-handler-net");
 		let sync_event_stream = sync.event_stream("transactions-handler-sync");
 		let (to_handler, from_controller) = tracing_unbounded("mpsc_transactions_handler", 100_000);
 
 		let handler = TransactionsHandler {
 			protocol_name: self.protocol_name,
+			notification_service: self.notification_service,
 			propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT))
 				as Pin<Box<dyn Stream<Item = ()> + Send>>)
 				.fuse(),
@@ -186,7 +182,6 @@ impl TransactionsHandlerPrototype {
 			pending_transactions_peers: HashMap::new(),
 			network,
 			sync,
-			net_event_stream: net_event_stream.fuse(),
 			sync_event_stream: sync_event_stream.fuse(),
 			peers: HashMap::new(),
 			transaction_pool,
@@ -253,8 +248,6 @@ pub struct TransactionsHandler<
 	network: N,
 	/// Syncing service.
 	sync: S,
-	/// Stream of networking events.
-	net_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = Event> + Send>>>,
 	/// Receiver for syncing-related events.
 	sync_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = SyncEvent> + Send>>>,
 	// All connected peers
@@ -263,6 +256,8 @@ pub struct TransactionsHandler<
 	from_controller: TracingUnboundedReceiver<ToHandler<H>>,
 	/// Prometheus metrics.
 	metrics: Option<Metrics>,
+	/// Handle that is used to communicate with `sc_network::Notifications`.
+	notification_service: Box<dyn NotificationService>,
 }
 
 /// Peer information
@@ -295,14 +290,6 @@ where
 						warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!");
 					}
 				},
-				network_event = self.net_event_stream.next() => {
-					if let Some(network_event) = network_event {
-						self.handle_network_event(network_event).await;
-					} else {
-						// Networking has seemingly closed. Closing as well.
-						return;
-					}
-				},
 				sync_event = self.sync_event_stream.next() => {
 					if let Some(sync_event) = sync_event {
 						self.handle_sync_event(sync_event);
@@ -317,10 +304,61 @@ where
 						ToHandler::PropagateTransactions => self.propagate_transactions(),
 					}
 				},
+				event = self.notification_service.next_event().fuse() => {
+					if let Some(event) = event {
+						self.handle_notification_event(event)
+					} else {
+						// `Notifications` has seemingly closed. Closing as well.
+						return
+					}
+				}
 			}
 		}
 	}
 
+	fn handle_notification_event(&mut self, event: NotificationEvent) {
+		match event {
+			NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => {
+				// only accept peers whose role can be determined
+				let result = self
+					.network
+					.peer_role(peer, handshake)
+					.map_or(ValidationResult::Reject, |_| ValidationResult::Accept);
+				let _ = result_tx.send(result);
+			},
+			NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => {
+				let Some(role) = self.network.peer_role(peer, handshake) else {
+					log::debug!(target: "sub-libp2p", "role for {peer} couldn't be determined");
+					return
+				};
+
+				let _was_in = self.peers.insert(
+					peer,
+					Peer {
+						known_transactions: LruHashSet::new(
+							NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"),
+						),
+						role,
+					},
+				);
+				debug_assert!(_was_in.is_none());
+			},
+			NotificationEvent::NotificationStreamClosed { peer } => {
+				let _peer = self.peers.remove(&peer);
+				debug_assert!(_peer.is_some());
+			},
+			NotificationEvent::NotificationReceived { peer, notification } => {
+				if let Ok(m) =
+					<Transactions<B::Extrinsic> as Decode>::decode(&mut notification.as_ref())
+				{
+					self.on_transactions(peer, m);
+				} else {
+					warn!(target: "sub-libp2p", "Failed to decode transactions list");
+				}
+			},
+		}
+	}
+
 	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
 			SyncEvent::PeerConnected(remote) => {
@@ -346,51 +384,6 @@ where
 		}
 	}
 
-	async fn handle_network_event(&mut self, event: Event) {
-		match event {
-			Event::Dht(_) => {},
-			Event::NotificationStreamOpened { remote, protocol, role, .. }
-				if protocol == self.protocol_name =>
-			{
-				let _was_in = self.peers.insert(
-					remote,
-					Peer {
-						known_transactions: LruHashSet::new(
-							NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"),
-						),
-						role,
-					},
-				);
-				debug_assert!(_was_in.is_none());
-			},
-			Event::NotificationStreamClosed { remote, protocol }
-				if protocol == self.protocol_name =>
-			{
-				let _peer = self.peers.remove(&remote);
-				debug_assert!(_peer.is_some());
-			},
-
-			Event::NotificationsReceived { remote, messages } => {
-				for (protocol, message) in messages {
-					if protocol != self.protocol_name {
-						continue
-					}
-
-					if let Ok(m) =
-						<Transactions<B::Extrinsic> as Decode>::decode(&mut message.as_ref())
-					{
-						self.on_transactions(remote, m);
-					} else {
-						warn!(target: "sub-libp2p", "Failed to decode transactions list");
-					}
-				}
-			},
-
-			// Not our concern.
-			Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {},
-		}
-	}
-
 	/// Called when peer sends us new transactions
 	fn on_transactions(&mut self, who: PeerId, transactions: Transactions<B::Extrinsic>) {
 		// Accept transactions only when node is not major syncing
@@ -482,8 +475,7 @@ where
 					propagated_to.entry(hash).or_default().push(who.to_base58());
 				}
 				trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who);
-				self.network
-					.write_notification(*who, self.protocol_name.clone(), to_send.encode());
+				let _ = self.notification_service.send_sync_notification(who, to_send.encode());
 			}
 		}
 
diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs
index c7df5784d32..2901bab2f26 100644
--- a/substrate/client/offchain/src/api.rs
+++ b/substrate/client/offchain/src/api.rs
@@ -223,7 +223,7 @@ mod tests {
 	use sc_client_db::offchain::LocalStorage;
 	use sc_network::{
 		config::MultiaddrWithPeerId, types::ProtocolName, NetworkPeers, NetworkStateInfo,
-		ReputationChange,
+		ObservedRole, ReputationChange,
 	};
 	use sp_core::offchain::{storage::OffchainDb, DbExternalities, Externalities, StorageKind};
 	use std::time::SystemTime;
@@ -294,6 +294,10 @@ mod tests {
 		fn sync_num_connected(&self) -> usize {
 			unimplemented!();
 		}
+
+		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
+			None
+		}
 	}
 
 	impl NetworkStateInfo for TestNetwork {
diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs
index 756ab77ff94..8bcfa66a5af 100644
--- a/substrate/client/offchain/src/lib.rs
+++ b/substrate/client/offchain/src/lib.rs
@@ -330,7 +330,9 @@ mod tests {
 	use libp2p::{Multiaddr, PeerId};
 	use sc_block_builder::BlockBuilderBuilder;
 	use sc_client_api::Backend as _;
-	use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName, ReputationChange};
+	use sc_network::{
+		config::MultiaddrWithPeerId, types::ProtocolName, ObservedRole, ReputationChange,
+	};
 	use sc_transaction_pool::BasicPool;
 	use sc_transaction_pool_api::{InPoolTransaction, TransactionPool};
 	use sp_consensus::BlockOrigin;
@@ -423,6 +425,10 @@ mod tests {
 		fn sync_num_connected(&self) -> usize {
 			unimplemented!();
 		}
+
+		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
+			None
+		}
 	}
 
 	#[test]
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index 25f998385ba..d078f44f198 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -753,6 +753,11 @@ where
 	}
 
 	let protocol_id = config.protocol_id();
+	let genesis_hash = client
+		.block_hash(0u32.into())
+		.ok()
+		.flatten()
+		.expect("Genesis block exists; qed");
 
 	let block_announce_validator = if let Some(f) = block_announce_validator_builder {
 		f(client.clone())
@@ -802,11 +807,7 @@ where
 			// Allow both outgoing and incoming requests.
 			let (handler, protocol_config) = WarpSyncRequestHandler::new(
 				protocol_id.clone(),
-				client
-					.block_hash(0u32.into())
-					.ok()
-					.flatten()
-					.expect("Genesis block exists; qed"),
+				genesis_hash,
 				config.chain_spec.fork_id(),
 				warp_with_provider.clone(),
 			);
@@ -845,17 +846,13 @@ where
 	}
 
 	// create transactions protocol and add it to the list of supported protocols of
-	// `network_params`
-	let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new(
-		protocol_id.clone(),
-		client
-			.block_hash(0u32.into())
-			.ok()
-			.flatten()
-			.expect("Genesis block exists; qed"),
-		config.chain_spec.fork_id(),
-	);
-	net_config.add_notification_protocol(transactions_handler_proto.set_config());
+	let (transactions_handler_proto, transactions_config) =
+		sc_network_transactions::TransactionsHandlerPrototype::new(
+			protocol_id.clone(),
+			genesis_hash,
+			config.chain_spec.fork_id(),
+		);
+	net_config.add_notification_protocol(transactions_config);
 
 	// Create `PeerStore` and initialize it with bootnode peer ids.
 	let peer_store = PeerStore::new(
@@ -869,7 +866,6 @@ where
 	let peer_store_handle = peer_store.handle();
 	spawn_handle.spawn("peer-store", Some("networking"), peer_store.run());
 
-	let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
 	let (engine, sync_service, block_announce_config) = SyncingEngine::new(
 		Roles::from(&config.role),
 		client.clone(),
@@ -884,7 +880,7 @@ where
 		block_downloader,
 		state_request_protocol_name,
 		warp_request_protocol_name,
-		rx,
+		peer_store_handle.clone(),
 	)?;
 	let sync_service_import_queue = sync_service.clone();
 	let sync_service = Arc::new(sync_service);
@@ -905,7 +901,6 @@ where
 		fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned),
 		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
 		block_announce_config,
-		tx,
 	};
 
 	let has_bootnodes = !network_params.network_config.network_config.boot_nodes.is_empty();
diff --git a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat
index ab678906648..e6d6ba8bb81 100644
--- a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat
+++ b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat
@@ -14,7 +14,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat
index ef456b6d620..dac7736244d 100644
--- a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat
+++ b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat
@@ -16,7 +16,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -44,8 +44,8 @@
 		;; [0..4) - size of the call
 		;; [4..8) - action to perform
 		;; [8..42) - code hash of the callee
-		(set_local $action (i32.load (i32.const 4)))
-		(set_local $code_hash_ptr (i32.const 8))
+		(local.set $action (i32.load (i32.const 4)))
+		(local.set $code_hash_ptr (i32.const 8))
 
 		;; Assert input size == 36 (4 for action + 32 for code_hash).
 		(call $assert
@@ -56,25 +56,25 @@
 		)
 
 		;; Call add_delegate_dependency when action == 1.
-		(if (i32.eq (get_local $action) (i32.const 1))
+		(if (i32.eq (local.get $action) (i32.const 1))
 		    (then
-				(call $add_delegate_dependency (get_local $code_hash_ptr))
+				(call $add_delegate_dependency (local.get $code_hash_ptr))
 			)
 			(else)
 		)
 
 		;; Call remove_delegate_dependency when action == 2.
-		(if (i32.eq (get_local $action) (i32.const 2))
+		(if (i32.eq (local.get $action) (i32.const 2))
 		    (then
 				(call $remove_delegate_dependency
-					(get_local $code_hash_ptr)
+					(local.get $code_hash_ptr)
 				)
 			)
 			(else)
 		)
 
 		;; Call terminate when action == 3.
-		(if (i32.eq (get_local $action) (i32.const 3))
+		(if (i32.eq (local.get $action) (i32.const 3))
 		    (then
 				(call $terminate
 					(i32.const 100)	;; Pointer to beneficiary address
diff --git a/substrate/frame/contracts/fixtures/data/balance.wat b/substrate/frame/contracts/fixtures/data/balance.wat
index d86d5c4b1c6..d7970c92e41 100644
--- a/substrate/frame/contracts/fixtures/data/balance.wat
+++ b/substrate/frame/contracts/fixtures/data/balance.wat
@@ -12,7 +12,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/call.wat b/substrate/frame/contracts/fixtures/data/call.wat
index 4558b2c6409..43b32049c88 100644
--- a/substrate/frame/contracts/fixtures/data/call.wat
+++ b/substrate/frame/contracts/fixtures/data/call.wat
@@ -7,7 +7,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat
index 3320922d9e2..5d76e19a74c 100644
--- a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat
+++ b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat
@@ -7,7 +7,7 @@
 
 	(func $assert (param i32)
 		(block $ok
-			(br_if $ok (get_local 0))
+			(br_if $ok (local.get 0))
 			(unreachable)
 		)
 	)
diff --git a/substrate/frame/contracts/fixtures/data/caller_contract.wat b/substrate/frame/contracts/fixtures/data/caller_contract.wat
index 929171b9a26..43eb8ccfd54 100644
--- a/substrate/frame/contracts/fixtures/data/caller_contract.wat
+++ b/substrate/frame/contracts/fixtures/data/caller_contract.wat
@@ -10,7 +10,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -37,10 +37,10 @@
 		)
 
 		;; Read current balance into local variable.
-		(set_local $sp (i32.const 1024))
+		(local.set $sp (i32.const 1024))
 
 		;; Fail to deploy the contract since it returns a non-zero exit status.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_instantiate
 				(i32.const 24)	;; Pointer to the code hash.
 				(i64.const 0)	;; How much ref_time weight to devote for the execution. 0 = all.
@@ -60,11 +60,11 @@
 
 		;; Check non-zero exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted
+			(i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted
 		)
 
 		;; Fail to deploy the contract due to insufficient ref_time weight.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_instantiate
 				(i32.const 24)	;; Pointer to the code hash.
 				(i64.const 1)	;; Supply too little ref_time weight
@@ -85,11 +85,11 @@
 
 		;; Check for special trap exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
+			(i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
 		)
 
 		;; Fail to deploy the contract due to insufficient ref_time weight.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_instantiate
 				(i32.const 24)	;; Pointer to the code hash.
 				(i64.const 0)	;; How much ref_time weight to devote for the execution. 0 = all.
@@ -110,17 +110,17 @@
 
 		;; Check for special trap exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
+			(i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
 		)
 
 		;; Length of the output buffer
 		(i32.store
-			(i32.sub (get_local $sp) (i32.const 4))
+			(i32.sub (local.get $sp) (i32.const 4))
 			(i32.const 256)
 		)
 
 		;; Deploy the contract successfully.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_instantiate
 				(i32.const 24)	;; Pointer to the code hash.
 				(i64.const 0)	;; How much ref_time weight to devote for the execution. 0 = all.
@@ -130,7 +130,7 @@
 				(i32.const 8)	;; Pointer to input data buffer address
 				(i32.const 8)	;; Length of input data buffer
 				(i32.const 16)	;; Pointer to the address output buffer
-				(i32.sub (get_local $sp) (i32.const 4))	;; Pointer to the address buffer length
+				(i32.sub (local.get $sp) (i32.const 4))	;; Pointer to the address buffer length
 				(i32.const 4294967295)	;; u32 max sentinel value: do not copy output
 				(i32.const 0)	;; Length is ignored in this case
 				(i32.const 0)	;; salt_ptr
@@ -141,28 +141,28 @@
 
 		;; Check for success exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success
+			(i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success
 		)
 
 		;; Check that address has the expected length
 		(call $assert
-			(i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32))
+			(i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 32))
 		)
 
 		;; Zero out destination buffer of output
 		(i32.store
-			(i32.sub (get_local $sp) (i32.const 4))
+			(i32.sub (local.get $sp) (i32.const 4))
 			(i32.const 0)
 		)
 
 		;; Length of the output buffer
 		(i32.store
-			(i32.sub (get_local $sp) (i32.const 8))
+			(i32.sub (local.get $sp) (i32.const 8))
 			(i32.const 4)
 		)
 
 		;; Call the new contract and expect it to return failing exit code.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_call
 				(i32.const 0)	;; Set no flag
 				(i32.const 16)	;; Pointer to "callee" address.
@@ -172,29 +172,29 @@
 				(i32.const 0)	;; Pointer to the buffer with value to transfer
 				(i32.const 9)	;; Pointer to input data buffer address
 				(i32.const 7)	;; Length of input data buffer
-				(i32.sub (get_local $sp) (i32.const 4))	;; Ptr to output buffer
-				(i32.sub (get_local $sp) (i32.const 8))	;; Ptr to output buffer len
+				(i32.sub (local.get $sp) (i32.const 4))	;; Ptr to output buffer
+				(i32.sub (local.get $sp) (i32.const 8))	;; Ptr to output buffer len
 			)
 		)
 
 		;; Check non-zero exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted
+			(i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted
 		)
 
 		;; Check that output buffer contains the expected return data.
 		(call $assert
-			(i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 3))
+			(i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 3))
 		)
 		(call $assert
 			(i32.eq
-				(i32.load (i32.sub (get_local $sp) (i32.const 4)))
+				(i32.load (i32.sub (local.get $sp) (i32.const 4)))
 				(i32.const 0x00776655)
 			)
 		)
 
 		;; Fail to call the contract due to insufficient ref_time weight.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_call
 				(i32.const 0)	;; Set no flag
 				(i32.const 16)	;; Pointer to "callee" address.
@@ -211,11 +211,11 @@
 
 		;; Check for special trap exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
+			(i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
 		)
 
 		;; Fail to call the contract due to insufficient proof_size weight.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_call
 				(i32.const 0)	;; Set no flag
 				(i32.const 16)	;; Pointer to "callee" address.
@@ -232,23 +232,23 @@
 
 		;; Check for special trap exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
+			(i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped
 		)
 
 		;; Zero out destination buffer of output
 		(i32.store
-			(i32.sub (get_local $sp) (i32.const 4))
+			(i32.sub (local.get $sp) (i32.const 4))
 			(i32.const 0)
 		)
 
 		;; Length of the output buffer
 		(i32.store
-			(i32.sub (get_local $sp) (i32.const 8))
+			(i32.sub (local.get $sp) (i32.const 8))
 			(i32.const 4)
 		)
 
 		;; Call the contract successfully.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_call
 				(i32.const 0)	;; Set no flag
 				(i32.const 16)	;; Pointer to "callee" address.
@@ -258,23 +258,23 @@
 				(i32.const 0)	;; Pointer to the buffer with value to transfer
 				(i32.const 8)	;; Pointer to input data buffer address
 				(i32.const 8)	;; Length of input data buffer
-				(i32.sub (get_local $sp) (i32.const 4))	;; Ptr to output buffer
-				(i32.sub (get_local $sp) (i32.const 8))	;; Ptr to output buffer len
+				(i32.sub (local.get $sp) (i32.const 4))	;; Ptr to output buffer
+				(i32.sub (local.get $sp) (i32.const 8))	;; Ptr to output buffer len
 			)
 		)
 
 		;; Check for success exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success
+			(i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success
 		)
 
 		;; Check that the output buffer contains the expected return data.
 		(call $assert
-			(i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 4))
+			(i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 4))
 		)
 		(call $assert
 			(i32.eq
-				(i32.load (i32.sub (get_local $sp) (i32.const 4)))
+				(i32.load (i32.sub (local.get $sp) (i32.const 4)))
 				(i32.const 0x77665544)
 			)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/chain_extension.wat b/substrate/frame/contracts/fixtures/data/chain_extension.wat
index 670f8e70172..c24ca286ff8 100644
--- a/substrate/frame/contracts/fixtures/data/chain_extension.wat
+++ b/substrate/frame/contracts/fixtures/data/chain_extension.wat
@@ -9,7 +9,7 @@
 
 	(func $assert (param i32)
 		(block $ok
-			(br_if $ok (get_local 0))
+			(br_if $ok (local.get 0))
 			(unreachable)
 		)
 	)
diff --git a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat
index b481abb5bc7..504646df1b0 100644
--- a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat
+++ b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat
@@ -11,7 +11,7 @@
 
 	(func $assert (param i32)
 		(block $ok
-			(br_if $ok (get_local 0))
+			(br_if $ok (local.get 0))
 			(unreachable)
 		)
 	)
diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat
index 5592e7e96a9..2bff53b638f 100644
--- a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat
+++ b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat
@@ -8,7 +8,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat
index cd720247843..00c9a657f39 100644
--- a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat
+++ b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat
@@ -14,7 +14,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat
index c2b4d6b81ed..9d86b02f419 100644
--- a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat
+++ b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat
@@ -59,8 +59,10 @@
 		(call $seal_input (local.get $input_ptr) (local.get $input_len_ptr))
 		(local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr)))
 		(if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7))
-			;; We check that the chosen hash fn  identifier is within bounds: [0,7]
-			(unreachable)
+			(then
+				;; We check that the chosen hash fn  identifier is within bounds: [0,7]
+				(unreachable)
+			)
 		)
 		(local.set $input_ptr (i32.add (local.get $input_ptr) (i32.const 1)))
 		(local.set $input_len (i32.sub (i32.load (local.get $input_len_ptr)) (i32.const 1)))
diff --git a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat
index e8c447b42fc..dae0de88418 100644
--- a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat
+++ b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat
@@ -8,7 +8,7 @@
 	(func $assert_eq (param i32 i32)
 		(block $ok
 			(br_if $ok
-				(i32.eq (get_local 0) (get_local 1))
+				(i32.eq (local.get 0) (local.get 1))
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat
index fc6ee72df8b..e9ce20ba42b 100644
--- a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat
+++ b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat
@@ -8,7 +8,7 @@
 	(func $assert_eq (param i32 i32)
 		(block $ok
 			(br_if $ok
-				(i32.eq (get_local 0) (get_local 1))
+				(i32.eq (local.get 0) (local.get 1))
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/debug_message_works.wat b/substrate/frame/contracts/fixtures/data/debug_message_works.wat
index 61933c23296..44a7b6db1be 100644
--- a/substrate/frame/contracts/fixtures/data/debug_message_works.wat
+++ b/substrate/frame/contracts/fixtures/data/debug_message_works.wat
@@ -8,7 +8,7 @@
 	(func $assert_eq (param i32 i32)
 		(block $ok
 			(br_if $ok
-				(i32.eq (get_local 0) (get_local 1))
+				(i32.eq (local.get 0) (local.get 1))
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/delegate_call.wat b/substrate/frame/contracts/fixtures/data/delegate_call.wat
index 7fe422af455..b8d4f0d47f0 100644
--- a/substrate/frame/contracts/fixtures/data/delegate_call.wat
+++ b/substrate/frame/contracts/fixtures/data/delegate_call.wat
@@ -24,7 +24,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -70,7 +70,7 @@
 		)
 
 		;; Call deployed library contract code.
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_delegate_call
 				(i32.const 0)	;; Set no call flags
 				(i32.const 64)	;; Pointer to "callee" code_hash.
@@ -83,7 +83,7 @@
 
 		;; Check for success exit status.
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success
+			(i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success
 		)
 
 		(call $assert
diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat
index 340b9699f87..62eea32800a 100644
--- a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat
+++ b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat
@@ -20,7 +20,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat
index 24ae5a13e33..ba0a8fcc8ae 100644
--- a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat
+++ b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat
@@ -12,7 +12,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat
index 25554795552..2afd3b2fbac 100644
--- a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat
+++ b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat
@@ -33,7 +33,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/drain.wat b/substrate/frame/contracts/fixtures/data/drain.wat
index cb8ff0aed61..18a21cca803 100644
--- a/substrate/frame/contracts/fixtures/data/drain.wat
+++ b/substrate/frame/contracts/fixtures/data/drain.wat
@@ -19,7 +19,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat
index d694b3215e8..4910e706069 100644
--- a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat
+++ b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat
@@ -12,7 +12,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/event_size.wat b/substrate/frame/contracts/fixtures/data/event_size.wat
index 4bd6158d72f..1c1f34b24d7 100644
--- a/substrate/frame/contracts/fixtures/data/event_size.wat
+++ b/substrate/frame/contracts/fixtures/data/event_size.wat
@@ -9,7 +9,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/multi_store.wat b/substrate/frame/contracts/fixtures/data/multi_store.wat
index 2592baf6183..c334ed54c4e 100644
--- a/substrate/frame/contracts/fixtures/data/multi_store.wat
+++ b/substrate/frame/contracts/fixtures/data/multi_store.wat
@@ -19,7 +19,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat
index c6b529e2aff..44db8d041b1 100644
--- a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat
+++ b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat
@@ -20,7 +20,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -36,11 +36,11 @@
 		(call $seal_input (i32.const 32) (i32.const 36))
 
 		;; reading manually passed reentrant count
-		(set_local $expected_reentrance_count (i32.load (i32.const 32)))
+		(local.set $expected_reentrance_count (i32.load (i32.const 32)))
 
 		;; reentrance count is calculated correctly
 		(call $assert
-			(i32.eq (call $reentrance_count) (get_local $expected_reentrance_count))
+			(i32.eq (call $reentrance_count) (local.get $expected_reentrance_count))
 		)
 
 		;; re-enter 5 times in a row and assert that the reentrant counter works as expected
@@ -52,7 +52,7 @@
 				(i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1)))
 
 				;; Call to itself
-				(set_local $seal_call_exit_code
+				(local.set $seal_call_exit_code
 					(call $seal_call
 						(i32.const 8)	;; Allow reentrancy flag set
 						(i32.const 0)	;; Pointer to "callee" address
@@ -66,7 +66,7 @@
 				)
 
 				(call $assert
-					(i32.eq (get_local $seal_call_exit_code) (i32.const 0))
+					(i32.eq (local.get $seal_call_exit_code) (i32.const 0))
 				)
 			)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat
index b8219a8462e..49e0193bcdb 100644
--- a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat
+++ b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat
@@ -17,7 +17,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -30,7 +30,7 @@
 		(call $seal_input (i32.const 0) (i32.const 36))
 
 		;; reading passed callstack height
-		(set_local $callstack_height (i32.load (i32.const 32)))
+		(local.set $callstack_height (i32.load (i32.const 32)))
 
 		;; incrementing callstack height
 		(i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1)))
@@ -40,12 +40,12 @@
 			(i32.eq (call $reentrance_count) (i32.const 0))
 		)
 
-		(i32.eq (get_local $callstack_height) (i32.const 5))
+		(i32.eq (local.get $callstack_height) (i32.const 5))
 		(if
 			(then) ;; exit recursion case
 			(else
 				;; Call to itself
-				(set_local $delegate_call_exit_code
+				(local.set $delegate_call_exit_code
 					(call $seal_delegate_call
 						(i32.const 0)	;; Set no call flags
 						(i32.const 0)	;; Pointer to "callee" code_hash.
@@ -57,13 +57,13 @@
 				)
 
 				(call $assert
-					(i32.eq (get_local $delegate_call_exit_code) (i32.const 0))
+					(i32.eq (local.get $delegate_call_exit_code) (i32.const 0))
 				)
 			)
 		)
 
 		(call $assert
-			(i32.le_s (get_local $callstack_height) (i32.const 5))
+			(i32.le_s (local.get $callstack_height) (i32.const 5))
 		)
 	)
 
diff --git a/substrate/frame/contracts/fixtures/data/self_destruct.wat b/substrate/frame/contracts/fixtures/data/self_destruct.wat
index b8a37306e20..00c3895fdde 100644
--- a/substrate/frame/contracts/fixtures/data/self_destruct.wat
+++ b/substrate/frame/contracts/fixtures/data/self_destruct.wat
@@ -26,7 +26,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat
index 85fce511e21..628f283a19f 100644
--- a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat
+++ b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat
@@ -5,7 +5,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/set_code_hash.wat b/substrate/frame/contracts/fixtures/data/set_code_hash.wat
index b4df1b13318..c0a9557b4d0 100644
--- a/substrate/frame/contracts/fixtures/data/set_code_hash.wat
+++ b/substrate/frame/contracts/fixtures/data/set_code_hash.wat
@@ -16,7 +16,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -27,11 +27,11 @@
 
 		(call $seal_input (i32.const 0) (i32.const 32))
 
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_set_code_hash (i32.const 0)) ;; Pointer to the input data.
 		)
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success
+			(i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success
 		)
 
 		;; we return 1 after setting new code_hash
diff --git a/substrate/frame/contracts/fixtures/data/storage_size.wat b/substrate/frame/contracts/fixtures/data/storage_size.wat
index 293a656d4f6..728bb4fcf3c 100644
--- a/substrate/frame/contracts/fixtures/data/storage_size.wat
+++ b/substrate/frame/contracts/fixtures/data/storage_size.wat
@@ -20,7 +20,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/store_call.wat b/substrate/frame/contracts/fixtures/data/store_call.wat
index 9e090d31801..746b7a48b55 100644
--- a/substrate/frame/contracts/fixtures/data/store_call.wat
+++ b/substrate/frame/contracts/fixtures/data/store_call.wat
@@ -15,7 +15,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/store_deploy.wat b/substrate/frame/contracts/fixtures/data/store_deploy.wat
index cc428e9623b..7f115cba977 100644
--- a/substrate/frame/contracts/fixtures/data/store_deploy.wat
+++ b/substrate/frame/contracts/fixtures/data/store_deploy.wat
@@ -15,7 +15,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat
index b3459996a2e..72ef14ed82c 100644
--- a/substrate/frame/contracts/fixtures/data/xcm_execute.wat
+++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat
@@ -12,7 +12,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat
index 9eec6388de9..fe29ddf0f14 100644
--- a/substrate/frame/contracts/fixtures/data/xcm_send.wat
+++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat
@@ -12,7 +12,7 @@
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs
index 77e94b16777..f73655e9920 100644
--- a/substrate/frame/contracts/src/wasm/mod.rs
+++ b/substrate/frame/contracts/src/wasm/mod.rs
@@ -1506,7 +1506,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1531,7 +1531,7 @@ mod tests {
 		)
 
 		;; Find out the size of the buffer
-		(set_local $buf_size
+		(local.set $buf_size
 			(i32.load (i32.const 32))
 		)
 
@@ -1539,7 +1539,7 @@ mod tests {
 		(call $seal_return
 			(i32.const 0)
 			(i32.const 36)
-			(get_local $buf_size)
+			(local.get $buf_size)
 		)
 
 		;; env:seal_return doesn't return, so this is effectively unreachable.
@@ -1575,7 +1575,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1633,7 +1633,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1680,7 +1680,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1726,7 +1726,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1773,7 +1773,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1836,7 +1836,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1925,7 +1925,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -1966,7 +1966,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -2013,7 +2013,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -2067,7 +2067,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -2137,7 +2137,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -2327,7 +2327,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -2995,7 +2995,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -3047,7 +3047,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
@@ -3162,18 +3162,18 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
 	)
 	(func (export "call")
 		(local $exit_code i32)
-		(set_local $exit_code
+		(local.set $exit_code
 			(call $seal_set_code_hash (i32.const 0))
 		)
 		(call $assert
-			(i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success
+			(i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success
 		)
 	)
 
@@ -3202,18 +3202,18 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
 	)
 	(func (export "call")
 		(local $return_val i32)
-		(set_local $return_val
+		(local.set $return_val
 			(call $reentrance_count)
 		)
 		(call $assert
-			(i32.eq (get_local $return_val) (i32.const 12))
+			(i32.eq (local.get $return_val) (i32.const 12))
 		)
 	)
 
@@ -3234,18 +3234,18 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
 	)
 	(func (export "call")
 		(local $return_val i32)
-		(set_local $return_val
+		(local.set $return_val
 			(call $account_reentrance_count (i32.const 0))
 		)
 		(call $assert
-			(i32.eq (get_local $return_val) (i32.const 12))
+			(i32.eq (local.get $return_val) (i32.const 12))
 		)
 	)
 
@@ -3267,7 +3267,7 @@ mod tests {
 	(func $assert (param i32)
 		(block $ok
 			(br_if $ok
-				(get_local 0)
+				(local.get 0)
 			)
 			(unreachable)
 		)
-- 
GitLab