diff --git a/Cargo.lock b/Cargo.lock
index 69b14ca30309e288439f10494dfba9237ffcf20c..2145a870e75c9568df5079ba9a6b5943c5d53674 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -165,7 +165,7 @@ dependencies = [
  "hex-literal",
  "itoa",
  "proptest",
- "rand",
+ "rand 0.8.5",
  "ruint",
  "serde",
  "tiny-keccak",
@@ -690,7 +690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c"
 dependencies = [
  "num-traits",
- "rand",
+ "rand 0.8.5",
 ]
 
 [[package]]
@@ -700,7 +700,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185"
 dependencies = [
  "num-traits",
- "rand",
+ "rand 0.8.5",
  "rayon",
 ]
 
@@ -1880,7 +1880,7 @@ dependencies = [
  "bp-parachains",
  "bp-polkadot-core",
  "bp-runtime",
- "ed25519-dalek",
+ "ed25519-dalek 2.1.0",
  "finality-grandpa",
  "parity-scale-codec",
  "sp-application-crypto",
@@ -2551,6 +2551,19 @@ dependencies = [
  "unsigned-varint",
 ]
 
+[[package]]
+name = "cid"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3"
+dependencies = [
+ "core2",
+ "multibase",
+ "multihash 0.18.1",
+ "serde",
+ "unsigned-varint",
+]
+
 [[package]]
 name = "cipher"
 version = "0.2.5"
@@ -2858,6 +2871,16 @@ dependencies = [
  "windows-sys 0.48.0",
 ]
 
+[[package]]
+name = "combine"
+version = "4.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4"
+dependencies = [
+ "bytes",
+ "memchr",
+]
+
 [[package]]
 name = "comfy-table"
 version = "7.1.0"
@@ -3347,6 +3370,21 @@ dependencies = [
  "wasmtime-types",
 ]
 
+[[package]]
+name = "crc"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2b432c56615136f8dba245fed7ec3d5518c500a31108661067e61e72fe7e6bc"
+dependencies = [
+ "crc-catalog",
+]
+
+[[package]]
+name = "crc-catalog"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
+
 [[package]]
 name = "crc32fast"
 version = "1.3.2"
@@ -3769,7 +3807,7 @@ dependencies = [
  "polkadot-overseer",
  "polkadot-primitives",
  "portpicker",
- "rand",
+ "rand 0.8.5",
  "sc-cli",
  "sc-client-api",
  "sc-consensus",
@@ -3879,7 +3917,7 @@ dependencies = [
  "polkadot-parachain-primitives",
  "polkadot-runtime-common",
  "polkadot-runtime-parachains",
- "rand",
+ "rand 0.8.5",
  "sc-client-api",
  "scale-info",
  "sp-core",
@@ -4201,7 +4239,7 @@ dependencies = [
  "parity-scale-codec",
  "pin-project",
  "polkadot-overseer",
- "rand",
+ "rand 0.8.5",
  "sc-client-api",
  "sc-rpc-api",
  "sc-service",
@@ -4342,7 +4380,7 @@ dependencies = [
  "polkadot-service",
  "polkadot-test-service",
  "portpicker",
- "rand",
+ "rand 0.8.5",
  "rococo-parachain-runtime",
  "sc-basic-authorship",
  "sc-block-builder",
@@ -4825,10 +4863,19 @@ dependencies = [
  "elliptic-curve",
  "rfc6979",
  "serdect",
- "signature",
+ "signature 2.1.0",
  "spki",
 ]
 
+[[package]]
+name = "ed25519"
+version = "1.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
+dependencies = [
+ "signature 1.6.4",
+]
+
 [[package]]
 name = "ed25519"
 version = "2.2.2"
@@ -4836,7 +4883,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d"
 dependencies = [
  "pkcs8",
- "signature",
+ "signature 2.1.0",
+]
+
+[[package]]
+name = "ed25519-dalek"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
+dependencies = [
+ "curve25519-dalek 3.2.0",
+ "ed25519 1.5.3",
+ "rand 0.7.3",
+ "serde",
+ "sha2 0.9.9",
+ "zeroize",
 ]
 
 [[package]]
@@ -4846,7 +4907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0"
 dependencies = [
  "curve25519-dalek 4.1.2",
- "ed25519",
+ "ed25519 2.2.2",
  "rand_core 0.6.4",
  "serde",
  "sha2 0.10.7",
@@ -4875,7 +4936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9"
 dependencies = [
  "curve25519-dalek 4.1.2",
- "ed25519",
+ "ed25519 2.2.2",
  "hashbrown 0.14.3",
  "hex",
  "rand_core 0.6.4",
@@ -4968,6 +5029,18 @@ dependencies = [
  "syn 1.0.109",
 ]
 
+[[package]]
+name = "enum-as-inner"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a"
+dependencies = [
+ "heck 0.4.1",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.53",
+]
+
 [[package]]
 name = "enumflags2"
 version = "0.7.7"
@@ -5362,7 +5435,7 @@ dependencies = [
  "num-traits",
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "scale-info",
 ]
 
@@ -5385,7 +5458,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534"
 dependencies = [
  "byteorder",
- "rand",
+ "rand 0.8.5",
  "rustc-hex",
  "static_assertions",
 ]
@@ -5422,6 +5495,21 @@ version = "1.0.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
 
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
 [[package]]
 name = "fork-tree"
 version = "12.0.0"
@@ -5500,7 +5588,7 @@ dependencies = [
  "linked-hash-map",
  "log",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "rand_pcg",
  "sc-block-builder",
  "sc-chain-spec",
@@ -5568,7 +5656,7 @@ dependencies = [
  "frame-support",
  "frame-system",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sp-arithmetic",
  "sp-core",
@@ -5588,7 +5676,7 @@ dependencies = [
  "frame-support",
  "honggfuzz",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sp-arithmetic",
  "sp-npos-elections",
@@ -6110,7 +6198,7 @@ version = "0.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9"
 dependencies = [
- "rand",
+ "rand 0.8.5",
  "rand_core 0.6.4",
 ]
 
@@ -6221,7 +6309,7 @@ dependencies = [
  "nonzero_ext",
  "parking_lot 0.12.1",
  "quanta",
- "rand",
+ "rand 0.8.5",
  "smallvec",
 ]
 
@@ -6897,7 +6985,7 @@ dependencies = [
  "jsonrpsee-types",
  "parking_lot 0.12.1",
  "pin-project",
- "rand",
+ "rand 0.8.5",
  "rustc-hash",
  "serde",
  "serde_json",
@@ -7355,7 +7443,7 @@ dependencies = [
  "parking_lot 0.12.1",
  "pin-project",
  "quick-protobuf",
- "rand",
+ "rand 0.8.5",
  "rw-stream-sink",
  "smallvec",
  "thiserror",
@@ -7374,7 +7462,7 @@ dependencies = [
  "log",
  "parking_lot 0.12.1",
  "smallvec",
- "trust-dns-resolver",
+ "trust-dns-resolver 0.22.0",
 ]
 
 [[package]]
@@ -7406,12 +7494,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce"
 dependencies = [
  "bs58 0.4.0",
- "ed25519-dalek",
+ "ed25519-dalek 2.1.0",
  "log",
  "multiaddr",
  "multihash 0.17.0",
  "quick-protobuf",
- "rand",
+ "rand 0.8.5",
  "sha2 0.10.7",
  "thiserror",
  "zeroize",
@@ -7436,7 +7524,7 @@ dependencies = [
  "libp2p-swarm",
  "log",
  "quick-protobuf",
- "rand",
+ "rand 0.8.5",
  "sha2 0.10.7",
  "smallvec",
  "thiserror",
@@ -7458,11 +7546,11 @@ dependencies = [
  "libp2p-identity",
  "libp2p-swarm",
  "log",
- "rand",
+ "rand 0.8.5",
  "smallvec",
  "socket2 0.4.9",
  "tokio",
- "trust-dns-proto",
+ "trust-dns-proto 0.22.0",
  "void",
 ]
 
@@ -7494,7 +7582,7 @@ dependencies = [
  "log",
  "once_cell",
  "quick-protobuf",
- "rand",
+ "rand 0.8.5",
  "sha2 0.10.7",
  "snow",
  "static_assertions",
@@ -7516,7 +7604,7 @@ dependencies = [
  "libp2p-core",
  "libp2p-swarm",
  "log",
- "rand",
+ "rand 0.8.5",
  "void",
 ]
 
@@ -7536,7 +7624,7 @@ dependencies = [
  "log",
  "parking_lot 0.12.1",
  "quinn-proto",
- "rand",
+ "rand 0.8.5",
  "rustls 0.20.8",
  "thiserror",
  "tokio",
@@ -7554,7 +7642,7 @@ dependencies = [
  "libp2p-core",
  "libp2p-identity",
  "libp2p-swarm",
- "rand",
+ "rand 0.8.5",
  "smallvec",
 ]
 
@@ -7573,7 +7661,7 @@ dependencies = [
  "libp2p-identity",
  "libp2p-swarm-derive",
  "log",
- "rand",
+ "rand 0.8.5",
  "smallvec",
  "tokio",
  "void",
@@ -7621,7 +7709,7 @@ dependencies = [
  "rustls 0.20.8",
  "thiserror",
  "webpki",
- "x509-parser",
+ "x509-parser 0.14.0",
  "yasna",
 ]
 
@@ -7699,7 +7787,7 @@ dependencies = [
  "libsecp256k1-core",
  "libsecp256k1-gen-ecmult",
  "libsecp256k1-gen-genmult",
- "rand",
+ "rand 0.8.5",
  "serde",
  "sha2 0.9.9",
  "typenum",
@@ -7826,6 +7914,60 @@ dependencies = [
  "paste",
 ]
 
+[[package]]
+name = "litep2p"
+version = "0.3.0"
+source = "git+https://github.com/paritytech/litep2p?branch=master#b142c9eb611fb2fe78d2830266a3675b37299ceb"
+dependencies = [
+ "async-trait",
+ "bs58 0.4.0",
+ "bytes",
+ "cid 0.10.1",
+ "ed25519-dalek 1.0.1",
+ "futures",
+ "futures-timer",
+ "hex-literal",
+ "indexmap 2.2.3",
+ "libc",
+ "mockall",
+ "multiaddr",
+ "multihash 0.17.0",
+ "network-interface",
+ "nohash-hasher",
+ "parking_lot 0.12.1",
+ "pin-project",
+ "prost 0.11.9",
+ "prost-build",
+ "quinn",
+ "rand 0.8.5",
+ "rcgen",
+ "ring 0.16.20",
+ "rustls 0.20.8",
+ "serde",
+ "sha2 0.10.7",
+ "simple-dns",
+ "smallvec",
+ "snow",
+ "socket2 0.5.6",
+ "static_assertions",
+ "str0m",
+ "thiserror",
+ "tokio",
+ "tokio-stream",
+ "tokio-tungstenite 0.20.1",
+ "tokio-util",
+ "tracing",
+ "trust-dns-resolver 0.23.2",
+ "uint",
+ "unsigned-varint",
+ "url",
+ "webpki",
+ "x25519-dalek 2.0.0",
+ "x509-parser 0.15.1",
+ "yasna",
+ "zeroize",
+]
+
 [[package]]
 name = "lock_api"
 version = "0.4.10"
@@ -8099,7 +8241,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "69672161530e8aeca1d1400fbf3f1a1747ff60ea604265a4e906c2442df20532"
 dependencies = [
  "futures",
- "rand",
+ "rand 0.8.5",
  "thrift",
 ]
 
@@ -8205,7 +8347,7 @@ dependencies = [
  "lioness",
  "log",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_distr",
  "subtle 2.5.0",
@@ -8331,10 +8473,14 @@ version = "0.18.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815"
 dependencies = [
+ "blake2b_simd",
+ "blake2s_simd",
+ "blake3",
  "core2",
  "digest 0.10.7",
  "multihash-derive 0.8.0",
  "sha2 0.10.7",
+ "sha3",
  "unsigned-varint",
 ]
 
@@ -8461,7 +8607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc"
 dependencies = [
  "clap 3.2.25",
- "rand",
+ "rand 0.8.5",
 ]
 
 [[package]]
@@ -8536,6 +8682,18 @@ dependencies = [
  "tokio",
 ]
 
+[[package]]
+name = "network-interface"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae72fd9dbd7f55dda80c00d66acc3b2130436fcba9ea89118fc508eaae48dfb0"
+dependencies = [
+ "cc",
+ "libc",
+ "thiserror",
+ "winapi",
+]
+
 [[package]]
 name = "nix"
 version = "0.24.3"
@@ -8602,7 +8760,7 @@ dependencies = [
  "node-primitives",
  "node-testing",
  "parity-db",
- "rand",
+ "rand 0.8.5",
  "sc-basic-authorship",
  "sc-client-api",
  "sc-transaction-pool",
@@ -8924,12 +9082,60 @@ version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
 
+[[package]]
+name = "openssl"
+version = "0.10.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f"
+dependencies = [
+ "bitflags 2.4.0",
+ "cfg-if",
+ "foreign-types",
+ "libc",
+ "once_cell",
+ "openssl-macros",
+ "openssl-sys",
+]
+
+[[package]]
+name = "openssl-macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.53",
+]
+
 [[package]]
 name = "openssl-probe"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
 
+[[package]]
+name = "openssl-src"
+version = "300.2.3+3.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "openssl-sys"
+version = "0.9.102"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2"
+dependencies = [
+ "cc",
+ "libc",
+ "openssl-src",
+ "pkg-config",
+ "vcpkg",
+]
+
 [[package]]
 name = "option-ext"
 version = "0.2.0"
@@ -9232,7 +9438,7 @@ dependencies = [
  "frame-election-provider-support",
  "honggfuzz",
  "pallet-bags-list",
- "rand",
+ "rand 0.8.5",
 ]
 
 [[package]]
@@ -9480,7 +9686,7 @@ dependencies = [
  "pallet-session",
  "pallet-timestamp",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sp-consensus-aura",
  "sp-core",
@@ -9549,7 +9755,7 @@ dependencies = [
  "parity-scale-codec",
  "paste",
  "pretty_assertions",
- "rand",
+ "rand 0.8.5",
  "rand_pcg",
  "scale-info",
  "serde",
@@ -9767,7 +9973,7 @@ dependencies = [
  "pallet-election-provider-support-benchmarking",
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sp-arithmetic",
  "sp-core",
@@ -10140,7 +10346,7 @@ dependencies = [
  "frame-system",
  "log",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "rand_distr",
  "scale-info",
  "serde",
@@ -10373,7 +10579,7 @@ dependencies = [
  "honggfuzz",
  "log",
  "pallet-nomination-pools",
- "rand",
+ "rand 0.8.5",
  "sp-io",
  "sp-runtime",
  "sp-tracing 16.0.0",
@@ -10788,7 +10994,7 @@ dependencies = [
  "pallet-staking-reward-curve",
  "pallet-timestamp",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sp-core",
  "sp-io",
@@ -11454,7 +11660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9"
 dependencies = [
  "bitcoin_hashes 0.13.0",
- "rand",
+ "rand 0.8.5",
  "rand_core 0.6.4",
  "serde",
  "unicode-normalization",
@@ -11481,7 +11687,7 @@ dependencies = [
  "lz4",
  "memmap2 0.5.10",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "siphasher",
  "snap",
 ]
@@ -12119,7 +12325,7 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_core 0.6.4",
  "schnorrkel 0.11.4",
@@ -12145,7 +12351,7 @@ dependencies = [
  "polkadot-node-subsystem-test-helpers",
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "sp-application-crypto",
  "sp-authority-discovery",
@@ -12174,7 +12380,7 @@ dependencies = [
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
  "polkadot-subsystem-bench",
- "rand",
+ "rand 0.8.5",
  "sc-network",
  "schnellru",
  "sp-core",
@@ -12206,7 +12412,7 @@ dependencies = [
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
  "polkadot-subsystem-bench",
- "rand",
+ "rand 0.8.5",
  "sc-network",
  "schnellru",
  "sp-application-crypto",
@@ -12351,7 +12557,7 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
  "quickcheck",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "sc-network",
  "sc-network-common",
@@ -12443,7 +12649,7 @@ dependencies = [
  "polkadot-overseer",
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_core 0.6.4",
  "sc-keystore",
@@ -12721,7 +12927,7 @@ dependencies = [
  "polkadot-parachain-primitives",
  "polkadot-primitives",
  "procfs",
- "rand",
+ "rand 0.8.5",
  "rococo-runtime",
  "rusty-fork",
  "sc-sysinfo",
@@ -12859,6 +13065,7 @@ dependencies = [
  "polkadot-node-primitives",
  "polkadot-primitives",
  "sc-network",
+ "sc-network-types",
  "sp-core",
  "thiserror",
  "tokio",
@@ -12905,10 +13112,12 @@ dependencies = [
  "polkadot-node-jaeger",
  "polkadot-node-primitives",
  "polkadot-primitives",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "sc-authority-discovery",
  "sc-network",
+ "sc-network-types",
+ "sp-runtime",
  "strum 0.26.2",
  "thiserror",
  "tracing-gum",
@@ -12983,6 +13192,7 @@ dependencies = [
  "polkadot-statement-table",
  "sc-client-api",
  "sc-network",
+ "sc-network-types",
  "sc-transaction-pool-api",
  "smallvec",
  "sp-api",
@@ -13026,7 +13236,7 @@ dependencies = [
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
  "prioritized-metered-channel",
- "rand",
+ "rand 0.8.5",
  "sc-client-api",
  "schnellru",
  "sp-application-crypto",
@@ -13207,7 +13417,7 @@ name = "polkadot-primitives-test-helpers"
 version = "1.0.0"
 dependencies = [
  "polkadot-primitives",
- "rand",
+ "rand 0.8.5",
  "sp-application-crypto",
  "sp-core",
  "sp-keyring",
@@ -13346,7 +13556,7 @@ dependencies = [
  "polkadot-primitives",
  "polkadot-primitives-test-helpers",
  "polkadot-runtime-metrics",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rstest",
  "rustc-hex",
@@ -13681,12 +13891,13 @@ dependencies = [
  "prometheus",
  "pyroscope",
  "pyroscope_pprofrs",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_core 0.6.4",
  "rand_distr",
  "sc-keystore",
  "sc-network",
+ "sc-network-types",
  "sc-service",
  "schnorrkel 0.11.4",
  "serde",
@@ -13759,7 +13970,7 @@ dependencies = [
  "polkadot-node-subsystem-types",
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
- "rand",
+ "rand 0.8.5",
  "sp-core",
  "sp-keystore",
  "substrate-build-script-utils",
@@ -13852,7 +14063,7 @@ dependencies = [
  "polkadot-runtime-parachains",
  "polkadot-service",
  "polkadot-test-runtime",
- "rand",
+ "rand 0.8.5",
  "sc-authority-discovery",
  "sc-chain-spec",
  "sc-cli",
@@ -14051,7 +14262,7 @@ version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9"
 dependencies = [
- "rand",
+ "rand 0.8.5",
 ]
 
 [[package]]
@@ -14338,7 +14549,7 @@ dependencies = [
  "bitflags 2.4.0",
  "lazy_static",
  "num-traits",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_xorshift",
  "regex-syntax 0.8.2",
@@ -14515,7 +14726,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
 dependencies = [
  "env_logger 0.8.4",
  "log",
- "rand",
+ "rand 0.8.5",
 ]
 
 [[package]]
@@ -14529,6 +14740,24 @@ dependencies = [
  "pin-project-lite 0.1.12",
 ]
 
+[[package]]
+name = "quinn"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e"
+dependencies = [
+ "bytes",
+ "pin-project-lite 0.2.12",
+ "quinn-proto",
+ "quinn-udp",
+ "rustc-hash",
+ "rustls 0.20.8",
+ "thiserror",
+ "tokio",
+ "tracing",
+ "webpki",
+]
+
 [[package]]
 name = "quinn-proto"
 version = "0.9.5"
@@ -14536,7 +14765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989"
 dependencies = [
  "bytes",
- "rand",
+ "rand 0.8.5",
  "ring 0.16.20",
  "rustc-hash",
  "rustls 0.20.8",
@@ -14547,6 +14776,19 @@ dependencies = [
  "webpki",
 ]
 
+[[package]]
+name = "quinn-udp"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4"
+dependencies = [
+ "libc",
+ "quinn-proto",
+ "socket2 0.4.9",
+ "tracing",
+ "windows-sys 0.42.0",
+]
+
 [[package]]
 name = "quote"
 version = "1.0.35"
@@ -14562,6 +14804,19 @@ version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
 
+[[package]]
+name = "rand"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
+dependencies = [
+ "getrandom 0.1.16",
+ "libc",
+ "rand_chacha 0.2.2",
+ "rand_core 0.5.1",
+ "rand_hc",
+]
+
 [[package]]
 name = "rand"
 version = "0.8.5"
@@ -14618,7 +14873,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
 dependencies = [
  "num-traits",
- "rand",
+ "rand 0.8.5",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
+dependencies = [
+ "rand_core 0.5.1",
 ]
 
 [[package]]
@@ -15294,7 +15558,7 @@ dependencies = [
  "parity-scale-codec",
  "primitive-types",
  "proptest",
- "rand",
+ "rand 0.8.5",
  "rlp",
  "ruint-macro",
  "serde",
@@ -15606,15 +15870,16 @@ dependencies = [
  "libp2p",
  "linked_hash_set",
  "log",
- "multihash 0.18.1",
+ "multihash 0.17.0",
  "multihash-codetable",
  "parity-scale-codec",
  "prost 0.12.3",
  "prost-build",
  "quickcheck",
- "rand",
+ "rand 0.8.5",
  "sc-client-api",
  "sc-network",
+ "sc-network-types",
  "sp-api",
  "sp-authority-discovery",
  "sp-blockchain",
@@ -15724,7 +15989,7 @@ dependencies = [
  "names",
  "parity-bip39",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "regex",
  "rpassword",
  "sc-client-api",
@@ -15797,7 +16062,7 @@ dependencies = [
  "parity-scale-codec",
  "parking_lot 0.12.1",
  "quickcheck",
- "rand",
+ "rand 0.8.5",
  "sc-client-api",
  "sc-state-db",
  "schnellru",
@@ -15820,11 +16085,11 @@ dependencies = [
  "async-trait",
  "futures",
  "futures-timer",
- "libp2p-identity",
  "log",
  "mockall",
  "parking_lot 0.12.1",
  "sc-client-api",
+ "sc-network-types",
  "sc-utils",
  "serde",
  "sp-api",
@@ -15965,6 +16230,7 @@ dependencies = [
  "sc-network-gossip",
  "sc-network-sync",
  "sc-network-test",
+ "sc-network-types",
  "sc-utils",
  "serde",
  "sp-api",
@@ -16038,7 +16304,7 @@ dependencies = [
  "log",
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
@@ -16048,6 +16314,7 @@ dependencies = [
  "sc-network-gossip",
  "sc-network-sync",
  "sc-network-test",
+ "sc-network-types",
  "sc-telemetry",
  "sc-transaction-pool-api",
  "sc-utils",
@@ -16306,7 +16573,6 @@ dependencies = [
  "bytes",
  "futures",
  "futures-timer",
- "libp2p-identity",
  "log",
  "mixnet",
  "multiaddr",
@@ -16314,6 +16580,7 @@ dependencies = [
  "parking_lot 0.12.1",
  "sc-client-api",
  "sc-network",
+ "sc-network-types",
  "sc-transaction-pool-api",
  "sp-api",
  "sp-consensus",
@@ -16334,6 +16601,7 @@ dependencies = [
  "async-trait",
  "asynchronous-codec",
  "bytes",
+ "cid 0.9.0",
  "either",
  "fnv",
  "futures",
@@ -16341,25 +16609,34 @@ dependencies = [
  "ip_network",
  "libp2p",
  "linked_hash_set",
+ "litep2p",
  "log",
  "mockall",
  "multistream-select",
+ "once_cell",
  "parity-scale-codec",
  "parking_lot 0.12.1",
  "partial_sort",
  "pin-project",
- "rand",
+ "prost 0.11.9",
+ "prost-build",
+ "rand 0.8.5",
+ "sc-block-builder",
  "sc-client-api",
  "sc-network-common",
  "sc-network-light",
  "sc-network-sync",
+ "sc-network-types",
  "sc-utils",
+ "schnellru",
  "serde",
  "serde_json",
  "smallvec",
  "sp-arithmetic",
  "sp-blockchain",
+ "sp-consensus",
  "sp-core",
+ "sp-crypto-hashing",
  "sp-runtime",
  "sp-test-primitives",
  "sp-tracing 16.0.0",
@@ -16373,36 +16650,11 @@ dependencies = [
  "tokio-test",
  "tokio-util",
  "unsigned-varint",
+ "void",
  "wasm-timer",
  "zeroize",
 ]
 
-[[package]]
-name = "sc-network-bitswap"
-version = "0.33.0"
-dependencies = [
- "async-channel",
- "cid",
- "futures",
- "libp2p-identity",
- "log",
- "prost 0.12.3",
- "prost-build",
- "sc-block-builder",
- "sc-client-api",
- "sc-consensus",
- "sc-network",
- "sp-blockchain",
- "sp-consensus",
- "sp-crypto-hashing",
- "sp-runtime",
- "substrate-test-runtime",
- "substrate-test-runtime-client",
- "thiserror",
- "tokio",
- "unsigned-varint",
-]
-
 [[package]]
 name = "sc-network-common"
 version = "0.33.0"
@@ -16414,6 +16666,7 @@ dependencies = [
  "parity-scale-codec",
  "prost-build",
  "sc-consensus",
+ "sc-network-types",
  "sp-consensus",
  "sp-consensus-grandpa",
  "sp-runtime",
@@ -16435,6 +16688,7 @@ dependencies = [
  "sc-network",
  "sc-network-common",
  "sc-network-sync",
+ "sc-network-types",
  "schnellru",
  "sp-runtime",
  "substrate-prometheus-endpoint",
@@ -16450,13 +16704,13 @@ dependencies = [
  "array-bytes 6.1.0",
  "async-channel",
  "futures",
- "libp2p-identity",
  "log",
  "parity-scale-codec",
  "prost 0.12.3",
  "prost-build",
  "sc-client-api",
  "sc-network",
+ "sc-network-types",
  "sp-blockchain",
  "sp-core",
  "sp-runtime",
@@ -16476,7 +16730,9 @@ dependencies = [
  "sc-network",
  "sc-network-common",
  "sc-network-sync",
+ "sc-network-types",
  "sp-consensus",
+ "sp-runtime",
  "sp-statement-store",
  "substrate-prometheus-endpoint",
 ]
@@ -16503,6 +16759,7 @@ dependencies = [
  "sc-consensus",
  "sc-network",
  "sc-network-common",
+ "sc-network-types",
  "sc-utils",
  "schnellru",
  "smallvec",
@@ -16531,7 +16788,7 @@ dependencies = [
  "libp2p",
  "log",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "sc-block-builder",
  "sc-client-api",
  "sc-consensus",
@@ -16539,6 +16796,7 @@ dependencies = [
  "sc-network-common",
  "sc-network-light",
  "sc-network-sync",
+ "sc-network-types",
  "sc-service",
  "sc-utils",
  "sp-blockchain",
@@ -16563,17 +16821,32 @@ dependencies = [
  "sc-network",
  "sc-network-common",
  "sc-network-sync",
+ "sc-network-types",
  "sc-utils",
  "sp-consensus",
  "sp-runtime",
  "substrate-prometheus-endpoint",
 ]
 
+[[package]]
+name = "sc-network-types"
+version = "0.10.0-dev"
+dependencies = [
+ "bs58 0.4.0",
+ "libp2p-identity",
+ "litep2p",
+ "multiaddr",
+ "multihash 0.17.0",
+ "rand 0.8.5",
+ "thiserror",
+]
+
 [[package]]
 name = "sc-offchain"
 version = "29.0.0"
 dependencies = [
  "array-bytes 6.1.0",
+ "async-trait",
  "bytes",
  "fnv",
  "futures",
@@ -16587,12 +16860,13 @@ dependencies = [
  "once_cell",
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "sc-block-builder",
  "sc-client-api",
  "sc-client-db",
  "sc-network",
  "sc-network-common",
+ "sc-network-types",
  "sc-transaction-pool",
  "sc-transaction-pool-api",
  "sc-utils",
@@ -16710,7 +16984,7 @@ dependencies = [
  "parity-scale-codec",
  "parking_lot 0.12.1",
  "pretty_assertions",
- "rand",
+ "rand 0.8.5",
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
@@ -16764,7 +17038,7 @@ dependencies = [
  "parity-scale-codec",
  "parking_lot 0.12.1",
  "pin-project",
- "rand",
+ "rand 0.8.5",
  "sc-chain-spec",
  "sc-client-api",
  "sc-client-db",
@@ -16773,11 +17047,11 @@ dependencies = [
  "sc-informant",
  "sc-keystore",
  "sc-network",
- "sc-network-bitswap",
  "sc-network-common",
  "sc-network-light",
  "sc-network-sync",
  "sc-network-transactions",
+ "sc-network-types",
  "sc-rpc",
  "sc-rpc-server",
  "sc-rpc-spec-v2",
@@ -16919,7 +17193,7 @@ dependencies = [
  "futures",
  "libc",
  "log",
- "rand",
+ "rand 0.8.5",
  "rand_pcg",
  "regex",
  "sc-telemetry",
@@ -16942,7 +17216,8 @@ dependencies = [
  "log",
  "parking_lot 0.12.1",
  "pin-project",
- "rand",
+ "rand 0.8.5",
+ "sc-network",
  "sc-utils",
  "serde",
  "serde_json",
@@ -17188,6 +17463,21 @@ dependencies = [
  "untrusted 0.7.1",
 ]
 
+[[package]]
+name = "sctp-proto"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f64cef148d3295c730c3cb340b0b252a4d570b1c7d4bf0808f88540b0a888bc"
+dependencies = [
+ "bytes",
+ "crc",
+ "fxhash",
+ "log",
+ "rand 0.8.5",
+ "slab",
+ "thiserror",
+]
+
 [[package]]
 name = "sec1"
 version = "0.7.3"
@@ -17514,6 +17804,7 @@ dependencies = [
  "cfg-if",
  "cpufeatures",
  "digest 0.10.7",
+ "sha1-asm",
 ]
 
 [[package]]
@@ -17527,6 +17818,15 @@ dependencies = [
  "digest 0.10.7",
 ]
 
+[[package]]
+name = "sha1-asm"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2ba6947745e7f86be3b8af00b7355857085dbdf8901393c89514510eb61f4e21"
+dependencies = [
+ "cc",
+]
+
 [[package]]
 name = "sha2"
 version = "0.9.9"
@@ -17632,6 +17932,12 @@ dependencies = [
  "libc",
 ]
 
+[[package]]
+name = "signature"
+version = "1.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c"
+
 [[package]]
 name = "signature"
 version = "2.1.0"
@@ -17655,6 +17961,15 @@ dependencies = [
  "wide",
 ]
 
+[[package]]
+name = "simple-dns"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694"
+dependencies = [
+ "bitflags 2.4.0",
+]
+
 [[package]]
 name = "simple-mermaid"
 version = "0.1.1"
@@ -17770,7 +18085,7 @@ dependencies = [
  "pbkdf2",
  "pin-project",
  "poly1305 0.8.0",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "ruzstd",
  "schnorrkel 0.10.2",
@@ -17813,7 +18128,7 @@ dependencies = [
  "no-std-net",
  "parking_lot 0.12.1",
  "pin-project",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "serde",
  "serde_json",
@@ -17914,7 +18229,7 @@ dependencies = [
  "hex-literal",
  "parity-bytes",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "rlp",
  "rustc-hex",
  "scale-info",
@@ -17937,7 +18252,7 @@ dependencies = [
  "hex",
  "lazy_static",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "snowbridge-amcl",
  "zeroize",
@@ -17985,7 +18300,7 @@ dependencies = [
  "log",
  "pallet-timestamp",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "rlp",
  "scale-info",
  "serde",
@@ -18279,7 +18594,7 @@ dependencies = [
  "http",
  "httparse",
  "log",
- "rand",
+ "rand 0.8.5",
  "sha-1 0.9.8",
 ]
 
@@ -18456,7 +18771,7 @@ dependencies = [
  "num-traits",
  "parity-scale-codec",
  "primitive-types",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "serde",
  "sp-crypto-hashing",
@@ -18676,7 +18991,7 @@ dependencies = [
  "parking_lot 0.12.1",
  "paste",
  "primitive-types",
- "rand",
+ "rand 0.8.5",
  "regex",
  "scale-info",
  "schnorrkel 0.11.4",
@@ -18861,7 +19176,7 @@ name = "sp-io"
 version = "30.0.0"
 dependencies = [
  "bytes",
- "ed25519-dalek",
+ "ed25519-dalek 2.1.0",
  "libsecp256k1",
  "log",
  "parity-scale-codec",
@@ -18896,7 +19211,7 @@ version = "0.34.0"
 dependencies = [
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.2.2",
  "sp-core",
  "sp-externalities 0.25.0",
@@ -18951,7 +19266,7 @@ name = "sp-npos-elections"
 version = "26.0.0"
 dependencies = [
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "serde",
  "sp-arithmetic",
@@ -18966,7 +19281,7 @@ version = "2.0.0-alpha.5"
 dependencies = [
  "clap 4.5.3",
  "honggfuzz",
- "rand",
+ "rand 0.8.5",
  "sp-npos-elections",
  "sp-runtime",
 ]
@@ -19010,7 +19325,7 @@ dependencies = [
  "log",
  "parity-scale-codec",
  "paste",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "serde",
  "serde_json",
@@ -19168,7 +19483,7 @@ dependencies = [
  "parity-scale-codec",
  "parking_lot 0.12.1",
  "pretty_assertions",
- "rand",
+ "rand 0.8.5",
  "smallvec",
  "sp-core",
  "sp-externalities 0.25.0",
@@ -19186,10 +19501,10 @@ version = "10.0.0"
 dependencies = [
  "aes-gcm 0.10.3",
  "curve25519-dalek 4.1.2",
- "ed25519-dalek",
+ "ed25519-dalek 2.1.0",
  "hkdf",
  "parity-scale-codec",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "sha2 0.10.7",
  "sp-api",
@@ -19315,7 +19630,7 @@ dependencies = [
  "nohash-hasher",
  "parity-scale-codec",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "scale-info",
  "schnellru",
  "sp-core",
@@ -19520,7 +19835,7 @@ dependencies = [
  "pallet-treasury",
  "parity-scale-codec",
  "platforms",
- "rand",
+ "rand 0.8.5",
  "regex",
  "sc-authority-discovery",
  "sc-basic-authorship",
@@ -19734,6 +20049,26 @@ dependencies = [
  "syn 1.0.109",
 ]
 
+[[package]]
+name = "str0m"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee48572247f422dcbe68630c973f8296fbd5157119cd36a3223e48bf83d47727"
+dependencies = [
+ "combine",
+ "crc",
+ "hmac 0.12.1",
+ "once_cell",
+ "openssl",
+ "openssl-sys",
+ "rand 0.8.5",
+ "sctp-proto",
+ "serde",
+ "sha-1 0.10.1",
+ "thiserror",
+ "tracing",
+]
+
 [[package]]
 name = "strobe-rs"
 version = "0.8.1"
@@ -20671,7 +21006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f"
 dependencies = [
  "pin-project",
- "rand",
+ "rand 0.8.5",
  "tokio",
 ]
 
@@ -20730,7 +21065,22 @@ dependencies = [
  "futures-util",
  "log",
  "tokio",
- "tungstenite",
+ "tungstenite 0.17.3",
+]
+
+[[package]]
+name = "tokio-tungstenite"
+version = "0.20.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c"
+dependencies = [
+ "futures-util",
+ "log",
+ "rustls 0.21.6",
+ "rustls-native-certs 0.6.3",
+ "tokio",
+ "tokio-rustls 0.24.1",
+ "tungstenite 0.20.1",
 ]
 
 [[package]]
@@ -21044,14 +21394,14 @@ dependencies = [
  "async-trait",
  "cfg-if",
  "data-encoding",
- "enum-as-inner",
+ "enum-as-inner 0.5.1",
  "futures-channel",
  "futures-io",
  "futures-util",
  "idna 0.2.3",
  "ipnet",
  "lazy_static",
- "rand",
+ "rand 0.8.5",
  "smallvec",
  "socket2 0.4.9",
  "thiserror",
@@ -21061,6 +21411,31 @@ dependencies = [
  "url",
 ]
 
+[[package]]
+name = "trust-dns-proto"
+version = "0.23.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374"
+dependencies = [
+ "async-trait",
+ "cfg-if",
+ "data-encoding",
+ "enum-as-inner 0.6.0",
+ "futures-channel",
+ "futures-io",
+ "futures-util",
+ "idna 0.4.0",
+ "ipnet",
+ "once_cell",
+ "rand 0.8.5",
+ "smallvec",
+ "thiserror",
+ "tinyvec",
+ "tokio",
+ "tracing",
+ "url",
+]
+
 [[package]]
 name = "trust-dns-resolver"
 version = "0.22.0"
@@ -21078,7 +21453,28 @@ dependencies = [
  "thiserror",
  "tokio",
  "tracing",
- "trust-dns-proto",
+ "trust-dns-proto 0.22.0",
+]
+
+[[package]]
+name = "trust-dns-resolver"
+version = "0.23.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6"
+dependencies = [
+ "cfg-if",
+ "futures-util",
+ "ipconfig",
+ "lru-cache",
+ "once_cell",
+ "parking_lot 0.12.1",
+ "rand 0.8.5",
+ "resolv-conf",
+ "smallvec",
+ "thiserror",
+ "tokio",
+ "tracing",
+ "trust-dns-proto 0.23.2",
 ]
 
 [[package]]
@@ -21162,13 +21558,33 @@ dependencies = [
  "http",
  "httparse",
  "log",
- "rand",
+ "rand 0.8.5",
  "sha-1 0.10.1",
  "thiserror",
  "url",
  "utf-8",
 ]
 
+[[package]]
+name = "tungstenite"
+version = "0.20.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9"
+dependencies = [
+ "byteorder",
+ "bytes",
+ "data-encoding",
+ "http",
+ "httparse",
+ "log",
+ "rand 0.8.5",
+ "rustls 0.21.6",
+ "sha1",
+ "thiserror",
+ "url",
+ "utf-8",
+]
+
 [[package]]
 name = "twox-hash"
 version = "1.6.3"
@@ -21177,7 +21593,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
 dependencies = [
  "cfg-if",
  "digest 0.10.7",
- "rand",
+ "rand 0.8.5",
  "static_assertions",
 ]
 
@@ -21272,14 +21688,15 @@ checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
 
 [[package]]
 name = "unsigned-varint"
-version = "0.7.1"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836"
+checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105"
 dependencies = [
  "asynchronous-codec",
  "bytes",
  "futures-io",
  "futures-util",
+ "tokio-util",
 ]
 
 [[package]]
@@ -21398,7 +21815,7 @@ dependencies = [
  "arrayref",
  "constcat",
  "digest 0.10.7",
- "rand",
+ "rand 0.8.5",
  "rand_chacha 0.3.1",
  "rand_core 0.6.4",
  "sha2 0.10.7",
@@ -21843,7 +22260,7 @@ dependencies = [
  "memfd",
  "memoffset 0.8.0",
  "paste",
- "rand",
+ "rand 0.8.5",
  "rustix 0.36.15",
  "wasmtime-asm-macros",
  "wasmtime-environ",
@@ -22156,6 +22573,21 @@ dependencies = [
  "windows-targets 0.48.5",
 ]
 
+[[package]]
+name = "windows-sys"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
+dependencies = [
+ "windows_aarch64_gnullvm 0.42.2",
+ "windows_aarch64_msvc 0.42.2",
+ "windows_i686_gnu 0.42.2",
+ "windows_i686_msvc 0.42.2",
+ "windows_x86_64_gnu 0.42.2",
+ "windows_x86_64_gnullvm 0.42.2",
+ "windows_x86_64_msvc 0.42.2",
+]
+
 [[package]]
 name = "windows-sys"
 version = "0.45.0"
@@ -22453,6 +22885,23 @@ dependencies = [
  "time",
 ]
 
+[[package]]
+name = "x509-parser"
+version = "0.15.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da"
+dependencies = [
+ "asn1-rs",
+ "data-encoding",
+ "der-parser",
+ "lazy_static",
+ "nom",
+ "oid-registry",
+ "rusticata-macros",
+ "thiserror",
+ "time",
+]
+
 [[package]]
 name = "xattr"
 version = "1.0.1"
@@ -22627,7 +23076,7 @@ dependencies = [
  "log",
  "nohash-hasher",
  "parking_lot 0.12.1",
- "rand",
+ "rand 0.8.5",
  "static_assertions",
 ]
 
@@ -22698,7 +23147,7 @@ dependencies = [
  "serde_json",
  "thiserror",
  "tokio",
- "tokio-tungstenite",
+ "tokio-tungstenite 0.17.2",
  "tracing-gum",
  "url",
 ]
diff --git a/Cargo.toml b/Cargo.toml
index d7ca9da71d54c9b9f979a6e69a05eeb058bee8ea..52431a9cefeaf3ec73803d289e8fbed43525fd70 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -260,13 +260,13 @@ members = [
 	"substrate/client/mixnet",
 	"substrate/client/network",
 	"substrate/client/network-gossip",
-	"substrate/client/network/bitswap",
 	"substrate/client/network/common",
 	"substrate/client/network/light",
 	"substrate/client/network/statement",
 	"substrate/client/network/sync",
 	"substrate/client/network/test",
 	"substrate/client/network/transactions",
+	"substrate/client/network/types",
 	"substrate/client/offchain",
 	"substrate/client/proposer-metrics",
 	"substrate/client/rpc",
diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs
index 6aea043713d873b77ecadc8b7efc7c7eb8c0039b..b84427c3a75a55e30d9867cbe8d45522a2d2f8c0 100644
--- a/cumulus/client/relay-chain-minimal-node/src/lib.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs
@@ -27,12 +27,16 @@ use polkadot_node_network_protocol::{
 	},
 };
 
+use polkadot_core_primitives::{Block as RelayBlock, Hash as RelayHash};
 use polkadot_node_subsystem_util::metrics::prometheus::Registry;
 use polkadot_primitives::CollatorPair;
 use polkadot_service::{overseer::OverseerGenArgs, IsParachainNode};
 
 use sc_authority_discovery::Service as AuthorityDiscoveryService;
-use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService};
+use sc_network::{
+	config::FullNetworkConfiguration, service::traits::NetworkService, Event, NetworkBackend,
+	NetworkEventStream,
+};
 use sc_service::{config::PrometheusConfig, Configuration, TaskManager};
 use sp_runtime::{app_crypto::Pair, traits::Block as BlockT};
 
@@ -51,7 +55,7 @@ fn build_authority_discovery_service<Block: BlockT>(
 	task_manager: &TaskManager,
 	client: Arc<BlockChainRpcClient>,
 	config: &Configuration,
-	network: Arc<NetworkService<Block, <Block as BlockT>::Hash>>,
+	network: Arc<dyn NetworkService>,
 	prometheus_registry: Option<Registry>,
 ) -> AuthorityDiscoveryService {
 	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
@@ -72,7 +76,7 @@ fn build_authority_discovery_service<Block: BlockT>(
 			..Default::default()
 		},
 		client,
-		network,
+		Arc::new(network.clone()),
 		Box::pin(dht_event_stream),
 		authority_discovery_role,
 		prometheus_registry,
@@ -92,12 +96,22 @@ async fn build_interface(
 	client: RelayChainRpcClient,
 ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option<CollatorPair>)> {
 	let collator_pair = CollatorPair::generate().0;
-	let collator_node = new_minimal_relay_chain(
-		polkadot_config,
-		collator_pair.clone(),
-		Arc::new(BlockChainRpcClient::new(client.clone())),
-	)
-	.await?;
+	let collator_node = match polkadot_config.network.network_backend {
+		sc_network::config::NetworkBackendType::Libp2p =>
+			new_minimal_relay_chain::<RelayBlock, sc_network::NetworkWorker<RelayBlock, RelayHash>>(
+				polkadot_config,
+				collator_pair.clone(),
+				Arc::new(BlockChainRpcClient::new(client.clone())),
+			)
+			.await?,
+		sc_network::config::NetworkBackendType::Litep2p =>
+			new_minimal_relay_chain::<RelayBlock, sc_network::Litep2pNetworkBackend>(
+				polkadot_config,
+				collator_pair.clone(),
+				Arc::new(BlockChainRpcClient::new(client.clone())),
+			)
+			.await?,
+	};
 	task_manager.add_child(collator_node.task_manager);
 	Ok((
 		Arc::new(RelayChainRpcInterface::new(client, collator_node.overseer_handle)),
@@ -143,6 +157,7 @@ pub async fn build_minimal_relay_chain_node_light_client(
 
 	build_interface(polkadot_config, task_manager, client).await
 }
+
 /// Builds a minimal relay chain node. Chain data is fetched
 /// via [`BlockChainRpcClient`] and fed into the overseer and its subsystems.
 ///
@@ -155,13 +170,18 @@ pub async fn build_minimal_relay_chain_node_light_client(
 /// - NetworkBridgeTx
 /// - RuntimeApi
 #[sc_tracing::logging::prefix_logs_with("Relaychain")]
-async fn new_minimal_relay_chain(
+async fn new_minimal_relay_chain<Block: BlockT, Network: NetworkBackend<RelayBlock, RelayHash>>(
 	config: Configuration,
 	collator_pair: CollatorPair,
 	relay_chain_rpc_client: Arc<BlockChainRpcClient>,
 ) -> Result<NewMinimalNode, RelayChainError> {
 	let role = config.role.clone();
-	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
+	let mut net_config =
+		sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network);
+	let metrics = Network::register_notification_metrics(
+		config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+	);
+	let peer_store_handle = net_config.peer_store_handle();
 
 	let prometheus_registry = config.prometheus_registry();
 	let task_manager = TaskManager::new(config.tokio_handle.clone(), prometheus_registry)?;
@@ -178,13 +198,18 @@ async fn new_minimal_relay_chain(
 	let peerset_protocol_names =
 		PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
 	let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
-	let notification_services = peer_sets_info(is_authority, &peerset_protocol_names)
-		.into_iter()
-		.map(|(config, (peerset, service))| {
-			net_config.add_notification_protocol(config);
-			(peerset, service)
-		})
-		.collect::<std::collections::HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>();
+	let notification_services = peer_sets_info::<_, Network>(
+		is_authority,
+		&peerset_protocol_names,
+		metrics.clone(),
+		Arc::clone(&peer_store_handle),
+	)
+	.into_iter()
+	.map(|(config, (peerset, service))| {
+		net_config.add_notification_protocol(config);
+		(peerset, service)
+	})
+	.collect::<std::collections::HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>();
 
 	let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
 	let (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) =
@@ -194,16 +219,17 @@ async fn new_minimal_relay_chain(
 		.chain_get_header(None)
 		.await?
 		.ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?;
-	let (network, network_starter, sync_service) = build_collator_network(
+	let (network, network_starter, sync_service) = build_collator_network::<Network>(
 		&config,
 		net_config,
 		task_manager.spawn_handle(),
 		genesis_hash,
 		best_header,
+		metrics,
 	)
 	.map_err(|e| RelayChainError::Application(Box::new(e) as Box<_>))?;
 
-	let authority_discovery_service = build_authority_discovery_service(
+	let authority_discovery_service = build_authority_discovery_service::<Block>(
 		&task_manager,
 		relay_chain_rpc_client.clone(),
 		&config,
@@ -236,24 +262,28 @@ async fn new_minimal_relay_chain(
 	Ok(NewMinimalNode { task_manager, overseer_handle })
 }
 
-fn build_request_response_protocol_receivers(
+fn build_request_response_protocol_receivers<
+	Block: BlockT,
+	Network: NetworkBackend<Block, <Block as BlockT>::Hash>,
+>(
 	request_protocol_names: &ReqProtocolNames,
-	config: &mut FullNetworkConfiguration,
+	config: &mut FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Network>,
 ) -> (
 	IncomingRequestReceiver<v1::CollationFetchingRequest>,
 	IncomingRequestReceiver<v2::CollationFetchingRequest>,
 	IncomingRequestReceiver<v1::AvailableDataFetchingRequest>,
 ) {
 	let (collation_req_v1_receiver, cfg) =
-		IncomingRequest::get_config_receiver(request_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(request_protocol_names);
 	config.add_request_response_protocol(cfg);
 	let (collation_req_v2_receiver, cfg) =
-		IncomingRequest::get_config_receiver(request_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(request_protocol_names);
 	config.add_request_response_protocol(cfg);
 	let (available_data_req_receiver, cfg) =
-		IncomingRequest::get_config_receiver(request_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(request_protocol_names);
 	config.add_request_response_protocol(cfg);
-	let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names);
+	let cfg =
+		Protocol::ChunkFetchingV1.get_outbound_only_config::<_, Network>(request_protocol_names);
 	config.add_request_response_protocol(cfg);
 	(collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver)
 }
diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs
index 7286fab7907cb6d475b81a4dfd97c5d001180873..025ac7a81a21c6a23622cac37882994309c54e85 100644
--- a/cumulus/client/relay-chain-minimal-node/src/network.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/network.rs
@@ -15,64 +15,56 @@
 // along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 use polkadot_core_primitives::{Block, Hash, Header};
-use sp_runtime::traits::{Block as BlockT, NumberFor};
+use sp_runtime::traits::NumberFor;
 
 use sc_network::{
 	config::{
-		NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake,
-		ProtocolId, SetConfig,
+		NetworkConfiguration, NonReservedPeerMode, NotificationHandshake, PeerStore, ProtocolId,
+		SetConfig,
 	},
-	peer_store::PeerStore,
-	NetworkService,
+	peer_store::PeerStoreProvider,
+	service::traits::NetworkService,
+	NotificationMetrics,
 };
 
-use sc_network::{config::FullNetworkConfiguration, NotificationService};
+use sc_network::{config::FullNetworkConfiguration, NetworkBackend, NotificationService};
 use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake};
 use sc_service::{error::Error, Configuration, NetworkStarter, SpawnTaskHandle};
 
 use std::{iter, sync::Arc};
 
 /// Build the network service, the network status sinks and an RPC sender.
-pub(crate) fn build_collator_network(
+pub(crate) fn build_collator_network<Network: NetworkBackend<Block, Hash>>(
 	config: &Configuration,
-	mut full_network_config: FullNetworkConfiguration,
+	mut network_config: FullNetworkConfiguration<Block, Hash, Network>,
 	spawn_handle: SpawnTaskHandle,
 	genesis_hash: Hash,
 	best_header: Header,
+	notification_metrics: NotificationMetrics,
 ) -> Result<
-	(
-		Arc<NetworkService<Block, Hash>>,
-		NetworkStarter,
-		Arc<dyn sp_consensus::SyncOracle + Send + Sync>,
-	),
+	(Arc<dyn NetworkService>, NetworkStarter, Arc<dyn sp_consensus::SyncOracle + Send + Sync>),
 	Error,
 > {
 	let protocol_id = config.protocol_id();
-	let (block_announce_config, _notification_service) = get_block_announce_proto_config::<Block>(
+	let (block_announce_config, _notification_service) = get_block_announce_proto_config::<Network>(
 		protocol_id.clone(),
 		&None,
 		Roles::from(&config.role),
 		best_header.number,
 		best_header.hash(),
 		genesis_hash,
+		notification_metrics.clone(),
+		network_config.peer_store_handle(),
 	);
 
 	// Since this node has no syncing, we do not want light-clients to connect to it.
 	// Here we set any potential light-client slots to 0.
-	adjust_network_config_light_in_peers(&mut full_network_config.network_config);
-
-	let peer_store = PeerStore::new(
-		full_network_config
-			.network_config
-			.boot_nodes
-			.iter()
-			.map(|bootnode| bootnode.peer_id)
-			.collect(),
-	);
-	let peer_store_handle = peer_store.handle();
+	adjust_network_config_light_in_peers(&mut network_config.network_config);
+
+	let peer_store = network_config.take_peer_store();
 	spawn_handle.spawn("peer-store", Some("networking"), peer_store.run());
 
-	let network_params = sc_network::config::Params::<Block> {
+	let network_params = sc_network::config::Params::<Block, Hash, Network> {
 		role: config.role.clone(),
 		executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
@@ -81,16 +73,17 @@ pub(crate) fn build_collator_network(
 			})
 		},
 		fork_id: None,
-		network_config: full_network_config,
-		peer_store: peer_store_handle,
+		network_config,
 		genesis_hash,
 		protocol_id,
 		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
 		block_announce_config,
+		bitswap_config: None,
+		notification_metrics,
 	};
 
-	let network_worker = sc_network::NetworkWorker::new(network_params)?;
-	let network_service = network_worker.service().clone();
+	let network_worker = Network::new(network_params)?;
+	let network_service = network_worker.network_service();
 
 	let (network_start_tx, network_start_rx) = futures::channel::oneshot::channel();
 
@@ -143,14 +136,16 @@ impl sp_consensus::SyncOracle for SyncOracle {
 	}
 }
 
-fn get_block_announce_proto_config<B: BlockT>(
+fn get_block_announce_proto_config<Network: NetworkBackend<Block, Hash>>(
 	protocol_id: ProtocolId,
 	fork_id: &Option<String>,
 	roles: Roles,
-	best_number: NumberFor<B>,
-	best_hash: B::Hash,
-	genesis_hash: B::Hash,
-) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
+	best_number: NumberFor<Block>,
+	best_hash: Hash,
+	genesis_hash: Hash,
+	metrics: NotificationMetrics,
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
+) -> (Network::NotificationProtocolConfig, Box<dyn NotificationService>) {
 	let block_announces_protocol = {
 		let genesis_hash = genesis_hash.as_ref();
 		if let Some(ref fork_id) = fork_id {
@@ -160,11 +155,11 @@ fn get_block_announce_proto_config<B: BlockT>(
 		}
 	};
 
-	NonDefaultSetConfig::new(
+	Network::notification_config(
 		block_announces_protocol.into(),
 		iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(),
 		1024 * 1024,
-		Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build(
+		Some(NotificationHandshake::new(BlockAnnouncesHandshake::<Block>::build(
 			roles,
 			best_number,
 			best_hash,
@@ -178,5 +173,7 @@ fn get_block_announce_proto_config<B: BlockT>(
 			reserved_nodes: Vec::new(),
 			non_reserved_mode: NonReservedPeerMode::Deny,
 		},
+		metrics,
+		peer_store_handle,
 	)
 }
diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs
index 91e884d6f7ecec86906b5b06ead50a054aaf2577..9b5f0bec53875c98dc563b86ecea1d4864a554da 100644
--- a/cumulus/client/service/src/lib.rs
+++ b/cumulus/client/service/src/lib.rs
@@ -40,7 +40,7 @@ use sc_consensus::{
 	import_queue::{ImportQueue, ImportQueueService},
 	BlockImport,
 };
-use sc_network::{config::SyncMode, NetworkService};
+use sc_network::{config::SyncMode, service::traits::NetworkService, NetworkBackend};
 use sc_network_sync::SyncingService;
 use sc_network_transactions::TransactionsHandlerController;
 use sc_service::{Configuration, NetworkStarter, SpawnTaskHandle, TaskManager, WarpSyncParams};
@@ -406,13 +406,15 @@ pub struct BuildNetworkParams<
 		+ HeaderBackend<Block>
 		+ BlockIdTo<Block>
 		+ 'static,
+	Network: NetworkBackend<Block, <Block as BlockT>::Hash>,
 	RCInterface,
 	IQ,
 > where
 	Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>,
 {
 	pub parachain_config: &'a Configuration,
-	pub net_config: sc_network::config::FullNetworkConfiguration,
+	pub net_config:
+		sc_network::config::FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Network>,
 	pub client: Arc<Client>,
 	pub transaction_pool: Arc<sc_transaction_pool::FullPool<Block, Client>>,
 	pub para_id: ParaId,
@@ -423,7 +425,7 @@ pub struct BuildNetworkParams<
 }
 
 /// Build the network service, the network status sinks and an RPC sender.
-pub async fn build_network<'a, Block, Client, RCInterface, IQ>(
+pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>(
 	BuildNetworkParams {
 		parachain_config,
 		net_config,
@@ -434,9 +436,9 @@ pub async fn build_network<'a, Block, Client, RCInterface, IQ>(
 		relay_chain_interface,
 		import_queue,
 		sybil_resistance_level,
-	}: BuildNetworkParams<'a, Block, Client, RCInterface, IQ>,
+	}: BuildNetworkParams<'a, Block, Client, Network, RCInterface, IQ>,
 ) -> sc_service::error::Result<(
-	Arc<NetworkService<Block, Block::Hash>>,
+	Arc<dyn NetworkService>,
 	TracingUnboundedSender<sc_rpc::system::Request<Block>>,
 	TransactionsHandlerController<Block::Hash>,
 	NetworkStarter,
@@ -461,6 +463,7 @@ where
 	for<'b> &'b Client: BlockImport<Block>,
 	RCInterface: RelayChainInterface + Clone + 'static,
 	IQ: ImportQueue<Block> + 'static,
+	Network: NetworkBackend<Block, <Block as BlockT>::Hash>,
 {
 	let warp_sync_params = match parachain_config.network.sync_mode {
 		SyncMode::Warp => {
@@ -485,6 +488,9 @@ where
 			Box::new(block_announce_validator) as Box<_>
 		},
 	};
+	let metrics = Network::register_notification_metrics(
+		parachain_config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+	);
 
 	sc_service::build_network(sc_service::BuildNetworkParams {
 		config: parachain_config,
@@ -496,6 +502,7 @@ where
 		block_announce_validator_builder: Some(Box::new(move |_| block_announce_validator)),
 		warp_sync_params,
 		block_relay: None,
+		metrics,
 	})
 }
 
diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs
index c0bd7acfebf82a71d16ce938d4cfb2da346ab32a..a897c7c534da70ba70ba2f69b8123b4aa378de23 100644
--- a/cumulus/polkadot-parachain/src/command.rs
+++ b/cumulus/polkadot-parachain/src/command.rs
@@ -21,7 +21,7 @@ use crate::{
 	fake_runtime_api::{
 		asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi,
 	},
-	service::{new_partial, Block},
+	service::{new_partial, Block, Hash},
 };
 use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions;
 use cumulus_primitives_core::ParaId;
@@ -498,7 +498,6 @@ macro_rules! construct_async_run {
 
 /// Parse command line arguments into service configuration.
 pub fn run() -> Result<()> {
-	use Runtime::*;
 	let cli = Cli::from_args();
 
 	match &cli.subcommand {
@@ -670,151 +669,236 @@ pub fn run() -> Result<()> {
 				info!("Parachain Account: {}", parachain_account);
 				info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
 
-				match config.chain_spec.runtime()? {
-					AssetHubPolkadot => crate::service::start_asset_hub_node::<
-						AssetHubPolkadotRuntimeApi,
-						AssetHubPolkadotAuraId,
-					>(config, polkadot_config, collator_options, id, hwbench)
-					.await
-					.map(|r| r.0)
-					.map_err(Into::into),
-
-					AssetHubKusama =>
-						crate::service::start_asset_hub_node::<
-							RuntimeApi,
-							AuraId,
-						>(config, polkadot_config, collator_options, id, hwbench)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-				    AssetHubRococo | AssetHubWestend =>
-						crate::service::start_asset_hub_lookahead_node::<
-						RuntimeApi,
-							AuraId,
-						>(config, polkadot_config, collator_options, id, hwbench)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					CollectivesPolkadot =>
-						crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					CollectivesWestend =>
-						crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					Seedling | Shell =>
-						crate::service::start_shell_node(
+				match polkadot_config.network.network_backend {
+					sc_network::config::NetworkBackendType::Libp2p =>
+						start_node::<sc_network::NetworkWorker<_, _>>(
 							config,
 							polkadot_config,
 							collator_options,
 							id,
 							hwbench,
 						)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					ContractsRococo => crate::service::start_contracts_rococo_node(
-						config,
-						polkadot_config,
-						collator_options,
-						id,
-						hwbench,
-					)
-					.await
-					.map(|r| r.0)
-					.map_err(Into::into),
-
-					BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type {
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal =>
-							crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench)
-								.await
-								.map(|r| r.0),
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal =>
-							crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench)
-							.await
-							.map(|r| r.0),
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment =>
-							crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-							.await
-							.map(|r| r.0),
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal |
-						chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment =>
-							crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-							.await
-							.map(|r| r.0),
-					}
-					.map_err(Into::into),
-
-					Coretime(coretime_runtime_type) => match coretime_runtime_type {
-						chain_spec::coretime::CoretimeRuntimeType::Kusama |
-						chain_spec::coretime::CoretimeRuntimeType::KusamaLocal |
-						chain_spec::coretime::CoretimeRuntimeType::Polkadot |
-						chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal |
-						chain_spec::coretime::CoretimeRuntimeType::Rococo |
-						chain_spec::coretime::CoretimeRuntimeType::RococoLocal |
-						chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment |
-						chain_spec::coretime::CoretimeRuntimeType::Westend |
-						chain_spec::coretime::CoretimeRuntimeType::WestendLocal |
-						chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment =>
-							crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-							.await
-							.map(|r| r.0),
-					}
-					.map_err(Into::into),
-
-					Penpal(_) | Default =>
-						crate::service::start_rococo_parachain_node(
+						.await,
+					sc_network::config::NetworkBackendType::Litep2p =>
+						start_node::<sc_network::Litep2pNetworkBackend>(
 							config,
 							polkadot_config,
 							collator_options,
 							id,
 							hwbench,
 						)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					Glutton | GluttonWestend =>
-						crate::service::start_basic_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-						.await
-						.map(|r| r.0)
-						.map_err(Into::into),
-
-					People(people_runtime_type) => match people_runtime_type {
-						chain_spec::people::PeopleRuntimeType::Kusama |
-						chain_spec::people::PeopleRuntimeType::KusamaLocal |
-						chain_spec::people::PeopleRuntimeType::Polkadot |
-						chain_spec::people::PeopleRuntimeType::PolkadotLocal |
-						chain_spec::people::PeopleRuntimeType::Rococo |
-						chain_spec::people::PeopleRuntimeType::RococoLocal |
-						chain_spec::people::PeopleRuntimeType::RococoDevelopment |
-						chain_spec::people::PeopleRuntimeType::Westend |
-						chain_spec::people::PeopleRuntimeType::WestendLocal |
-						chain_spec::people::PeopleRuntimeType::WestendDevelopment =>
-							crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench)
-							.await
-							.map(|r| r.0),
-					}
-					.map_err(Into::into),
+						.await,
 				}
 			})
 		},
 	}
 }
 
+async fn start_node<Network: sc_network::NetworkBackend<Block, Hash>>(
+	config: sc_service::Configuration,
+	polkadot_config: sc_service::Configuration,
+	collator_options: cumulus_client_cli::CollatorOptions,
+	id: ParaId,
+	hwbench: Option<sc_sysinfo::HwBench>,
+) -> Result<sc_service::TaskManager> {
+	match config.chain_spec.runtime()? {
+		Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::<
+			AssetHubPolkadotRuntimeApi,
+			AssetHubPolkadotAuraId,
+			Network,
+		>(config, polkadot_config, collator_options, id, hwbench)
+		.await
+		.map(|r| r.0)
+		.map_err(Into::into),
+
+		Runtime::AssetHubKusama => crate::service::start_asset_hub_node::<
+			RuntimeApi,
+			AuraId,
+			Network,
+		>(config, polkadot_config, collator_options, id, hwbench)
+		.await
+		.map(|r| r.0)
+		.map_err(Into::into),
+
+		Runtime::AssetHubRococo | Runtime::AssetHubWestend =>
+			crate::service::start_asset_hub_lookahead_node::<RuntimeApi, AuraId, Network>(
+				config,
+				polkadot_config,
+				collator_options,
+				id,
+				hwbench,
+			)
+			.await
+			.map(|r| r.0)
+			.map_err(Into::into),
+
+		Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::<Network>(
+			config,
+			polkadot_config,
+			collator_options,
+			id,
+			hwbench,
+		)
+		.await
+		.map(|r| r.0)
+		.map_err(Into::into),
+
+		Runtime::CollectivesWestend =>
+			crate::service::start_generic_aura_lookahead_node::<Network>(
+				config,
+				polkadot_config,
+				collator_options,
+				id,
+				hwbench,
+			)
+			.await
+			.map(|r| r.0)
+			.map_err(Into::into),
+
+		Runtime::Seedling | Runtime::Shell => crate::service::start_shell_node::<Network>(
+			config,
+			polkadot_config,
+			collator_options,
+			id,
+			hwbench,
+		)
+		.await
+		.map(|r| r.0)
+		.map_err(Into::into),
+
+		Runtime::ContractsRococo => crate::service::start_contracts_rococo_node::<Network>(
+			config,
+			polkadot_config,
+			collator_options,
+			id,
+			hwbench,
+		)
+		.await
+		.map(|r| r.0)
+		.map_err(Into::into),
+
+		Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type {
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal =>
+				crate::service::start_generic_aura_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal =>
+				crate::service::start_generic_aura_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment =>
+				crate::service::start_generic_aura_lookahead_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal |
+			chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment =>
+				crate::service::start_generic_aura_lookahead_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+		}
+		.map_err(Into::into),
+
+		Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type {
+			chain_spec::coretime::CoretimeRuntimeType::Kusama |
+			chain_spec::coretime::CoretimeRuntimeType::KusamaLocal |
+			chain_spec::coretime::CoretimeRuntimeType::Polkadot |
+			chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal |
+			chain_spec::coretime::CoretimeRuntimeType::Rococo |
+			chain_spec::coretime::CoretimeRuntimeType::RococoLocal |
+			chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment |
+			chain_spec::coretime::CoretimeRuntimeType::Westend |
+			chain_spec::coretime::CoretimeRuntimeType::WestendLocal |
+			chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment =>
+				crate::service::start_generic_aura_lookahead_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+		}
+		.map_err(Into::into),
+
+		Runtime::Penpal(_) | Runtime::Default =>
+			crate::service::start_rococo_parachain_node::<Network>(
+				config,
+				polkadot_config,
+				collator_options,
+				id,
+				hwbench,
+			)
+			.await
+			.map(|r| r.0)
+			.map_err(Into::into),
+
+		Runtime::Glutton | Runtime::GluttonWestend =>
+			crate::service::start_basic_lookahead_node::<Network>(
+				config,
+				polkadot_config,
+				collator_options,
+				id,
+				hwbench,
+			)
+			.await
+			.map(|r| r.0)
+			.map_err(Into::into),
+
+		Runtime::People(people_runtime_type) => match people_runtime_type {
+			chain_spec::people::PeopleRuntimeType::Kusama |
+			chain_spec::people::PeopleRuntimeType::KusamaLocal |
+			chain_spec::people::PeopleRuntimeType::Polkadot |
+			chain_spec::people::PeopleRuntimeType::PolkadotLocal |
+			chain_spec::people::PeopleRuntimeType::Rococo |
+			chain_spec::people::PeopleRuntimeType::RococoLocal |
+			chain_spec::people::PeopleRuntimeType::RococoDevelopment |
+			chain_spec::people::PeopleRuntimeType::Westend |
+			chain_spec::people::PeopleRuntimeType::WestendLocal |
+			chain_spec::people::PeopleRuntimeType::WestendDevelopment =>
+				crate::service::start_generic_aura_lookahead_node::<Network>(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					hwbench,
+				)
+				.await
+				.map(|r| r.0),
+		}
+		.map_err(Into::into),
+	}
+}
+
 impl DefaultConfigurationValues for RelayChainCli {
 	fn p2p_listen_port() -> u16 {
 		30334
diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs
index e9bb5947522b2b092e7a7b05729d795bc1d85721..63dd7f9df62e8936f9c33e9e0dc934a5bed5b807 100644
--- a/cumulus/polkadot-parachain/src/service.rs
+++ b/cumulus/polkadot-parachain/src/service.rs
@@ -51,7 +51,7 @@ use sc_consensus::{
 	BlockImportParams, ImportQueue,
 };
 use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
-use sc_network::{config::FullNetworkConfiguration, NetworkBlock};
+use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock};
 use sc_network_sync::SyncingService;
 use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
@@ -191,7 +191,7 @@ where
 ///
 /// This is the actual implementation that is abstract over the executor and the runtime api.
 #[sc_tracing::logging::prefix_logs_with("Parachain")]
-async fn start_node_impl<RuntimeApi, RB, BIQ, SC>(
+async fn start_node_impl<RuntimeApi, RB, BIQ, SC, Net>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
@@ -244,6 +244,7 @@ where
 		Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
 		Arc<ParachainBackend>,
 	) -> Result<(), sc_service::Error>,
+	Net: NetworkBackend<Block, Hash>,
 {
 	let parachain_config = prepare_node_config(parachain_config);
 
@@ -269,7 +270,7 @@ where
 	let prometheus_registry = parachain_config.prometheus_registry().cloned();
 	let transaction_pool = params.transaction_pool.clone();
 	let import_queue_service = params.import_queue.service();
-	let net_config = FullNetworkConfiguration::new(&parachain_config.network);
+	let net_config = FullNetworkConfiguration::<_, _, Net>::new(&parachain_config.network);
 
 	let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
 		build_network(BuildNetworkParams {
@@ -423,14 +424,14 @@ pub fn build_aura_import_queue(
 }
 
 /// Start a rococo parachain node.
-pub async fn start_rococo_parachain_node(
+pub async fn start_rococo_parachain_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -492,14 +493,14 @@ fn build_contracts_rpc_extensions(
 }
 
 /// Start a polkadot-shell parachain node.
-pub async fn start_shell_node(
+pub async fn start_shell_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -687,14 +688,14 @@ where
 }
 
 /// Start an aura powered parachain node. Some system chains use this.
-pub async fn start_generic_aura_node(
+pub async fn start_generic_aura_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -768,14 +769,14 @@ pub async fn start_generic_aura_node(
 /// Uses the lookahead collator to support async backing.
 ///
 /// Start an aura powered parachain node. Some system chains use this.
-pub async fn start_generic_aura_lookahead_node(
+pub async fn start_generic_aura_lookahead_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -792,7 +793,7 @@ pub async fn start_generic_aura_lookahead_node(
 /// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub
 /// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and
 /// needs to sync and upgrade before it can run `AuraApi` functions.
-pub async fn start_asset_hub_node<RuntimeApi, AuraId: AppCrypto + Send + Codec + Sync>(
+pub async fn start_asset_hub_node<RuntimeApi, AuraId: AppCrypto + Send + Codec + Sync, Net>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
@@ -813,8 +814,9 @@ where
 		+ frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
 	<<AuraId as AppCrypto>::Pair as Pair>::Signature:
 		TryFrom<Vec<u8>> + std::hash::Hash + sp_runtime::traits::Member + Codec,
+	Net: NetworkBackend<Block, Hash>,
 {
-	start_node_impl::<RuntimeApi, _, _, _>(
+	start_node_impl::<RuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -940,7 +942,11 @@ where
 ///
 /// Uses the lookahead collator to support async backing.
 #[sc_tracing::logging::prefix_logs_with("Parachain")]
-pub async fn start_asset_hub_lookahead_node<RuntimeApi, AuraId: AppCrypto + Send + Codec + Sync>(
+pub async fn start_asset_hub_lookahead_node<
+	RuntimeApi,
+	AuraId: AppCrypto + Send + Codec + Sync,
+	Net,
+>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
@@ -962,8 +968,9 @@ where
 		+ cumulus_primitives_aura::AuraUnincludedSegmentApi<Block>,
 	<<AuraId as AppCrypto>::Pair as Pair>::Signature:
 		TryFrom<Vec<u8>> + std::hash::Hash + sp_runtime::traits::Member + Codec,
+	Net: NetworkBackend<Block, Hash>,
 {
-	start_node_impl::<RuntimeApi, _, _, _>(
+	start_node_impl::<RuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -1213,14 +1220,14 @@ fn start_lookahead_aura_consensus(
 /// Start an aura powered parachain node which uses the lookahead collator to support async backing.
 /// This node is basic in the sense that its runtime api doesn't include common contents such as
 /// transaction payment. Used for aura glutton.
-pub async fn start_basic_lookahead_node(
+pub async fn start_basic_lookahead_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
@@ -1235,14 +1242,14 @@ pub async fn start_basic_lookahead_node(
 }
 
 /// Start a parachain node for Rococo Contracts.
-pub async fn start_contracts_rococo_node(
+pub async fn start_contracts_rococo_node<Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _>(
+	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
 		parachain_config,
 		polkadot_config,
 		collator_options,
diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs
index 3af3901d175e5a113f137741a5de2ca11544c238..bdc2a98972e8eec583ef57031a1b4f381aad8c1c 100644
--- a/cumulus/test/service/src/lib.rs
+++ b/cumulus/test/service/src/lib.rs
@@ -62,7 +62,9 @@ use polkadot_service::ProvideRuntimeApi;
 use sc_consensus::ImportQueue;
 use sc_network::{
 	config::{FullNetworkConfiguration, TransportConfig},
-	multiaddr, NetworkBlock, NetworkService, NetworkStateInfo,
+	multiaddr,
+	service::traits::NetworkService,
+	NetworkBackend, NetworkBlock, NetworkStateInfo,
 };
 use sc_service::{
 	config::{
@@ -74,7 +76,7 @@ use sc_service::{
 };
 use sp_arithmetic::traits::SaturatedConversion;
 use sp_blockchain::HeaderBackend;
-use sp_core::{Pair, H256};
+use sp_core::Pair;
 use sp_keyring::Sr25519Keyring;
 use sp_runtime::{codec::Encode, generic};
 use sp_state_machine::BasicExternalities;
@@ -304,7 +306,7 @@ async fn build_relay_chain_interface(
 ///
 /// This is the actual implementation that is abstract over the executor and the runtime api.
 #[sc_tracing::logging::prefix_logs_with(parachain_config.network.node_name.as_str())]
-pub async fn start_node_impl<RB>(
+pub async fn start_node_impl<RB, Net: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	collator_key: Option<CollatorPair>,
 	relay_chain_config: Configuration,
@@ -318,7 +320,7 @@ pub async fn start_node_impl<RB>(
 ) -> sc_service::error::Result<(
 	TaskManager,
 	Arc<Client>,
-	Arc<NetworkService<Block, H256>>,
+	Arc<dyn NetworkService>,
 	RpcHandlers,
 	TransactionPool,
 	Arc<Backend>,
@@ -348,7 +350,7 @@ where
 	.map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
 
 	let import_queue_service = params.import_queue.service();
-	let net_config = FullNetworkConfiguration::new(&parachain_config.network);
+	let net_config = FullNetworkConfiguration::<Block, Hash, Net>::new(&parachain_config.network);
 
 	let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
 		build_network(BuildNetworkParams {
@@ -494,7 +496,7 @@ pub struct TestNode {
 	/// Client's instance.
 	pub client: Arc<Client>,
 	/// Node's network.
-	pub network: Arc<NetworkService<Block, H256>>,
+	pub network: Arc<dyn NetworkService>,
 	/// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot
 	/// node" to other nodes.
 	pub addr: MultiaddrWithPeerId,
@@ -702,21 +704,38 @@ impl TestNodeBuilder {
 
 		let multiaddr = parachain_config.network.listen_addresses[0].clone();
 		let (task_manager, client, network, rpc_handlers, transaction_pool, backend) =
-			start_node_impl(
-				parachain_config,
-				self.collator_key,
-				relay_chain_config,
-				self.para_id,
-				self.wrap_announce_block,
-				false,
-				|_| Ok(jsonrpsee::RpcModule::new(())),
-				self.consensus,
-				collator_options,
-				self.record_proof_during_import,
-			)
-			.await
-			.expect("could not create Cumulus test service");
-
+			match relay_chain_config.network.network_backend {
+				sc_network::config::NetworkBackendType::Libp2p =>
+					start_node_impl::<_, sc_network::NetworkWorker<_, _>>(
+						parachain_config,
+						self.collator_key,
+						relay_chain_config,
+						self.para_id,
+						self.wrap_announce_block,
+						false,
+						|_| Ok(jsonrpsee::RpcModule::new(())),
+						self.consensus,
+						collator_options,
+						self.record_proof_during_import,
+					)
+					.await
+					.expect("could not create Cumulus test service"),
+				sc_network::config::NetworkBackendType::Litep2p =>
+					start_node_impl::<_, sc_network::Litep2pNetworkBackend>(
+						parachain_config,
+						self.collator_key,
+						relay_chain_config,
+						self.para_id,
+						self.wrap_announce_block,
+						false,
+						|_| Ok(jsonrpsee::RpcModule::new(())),
+						self.consensus,
+						collator_options,
+						self.record_proof_during_import,
+					)
+					.await
+					.expect("could not create Cumulus test service"),
+			};
 		let peer_id = network.local_peer_id();
 		let addr = MultiaddrWithPeerId { multiaddr, peer_id };
 
diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs
index 69a71a15389a58002e3ad3ac26495a48a0a0cd40..389eb643563ad2386648618ca497c63b64e771d0 100644
--- a/cumulus/test/service/src/main.rs
+++ b/cumulus/test/service/src/main.rs
@@ -101,18 +101,44 @@ fn main() -> Result<(), sc_cli::Error> {
 				.unwrap_or(cumulus_test_service::Consensus::RelayChain);
 
 			let (mut task_manager, _, _, _, _, _) = tokio_runtime
-				.block_on(cumulus_test_service::start_node_impl(
-					config,
-					collator_key,
-					polkadot_config,
-					parachain_id.into(),
-					cli.disable_block_announcements.then(wrap_announce_block),
-					cli.fail_pov_recovery,
-					|_| Ok(jsonrpsee::RpcModule::new(())),
-					consensus,
-					collator_options,
-					true,
-				))
+				.block_on(async move {
+					match polkadot_config.network.network_backend {
+						sc_network::config::NetworkBackendType::Libp2p =>
+							cumulus_test_service::start_node_impl::<
+								_,
+								sc_network::NetworkWorker<_, _>,
+							>(
+								config,
+								collator_key,
+								polkadot_config,
+								parachain_id.into(),
+								cli.disable_block_announcements.then(wrap_announce_block),
+								cli.fail_pov_recovery,
+								|_| Ok(jsonrpsee::RpcModule::new(())),
+								consensus,
+								collator_options,
+								true,
+							)
+							.await,
+						sc_network::config::NetworkBackendType::Litep2p =>
+							cumulus_test_service::start_node_impl::<
+								_,
+								sc_network::Litep2pNetworkBackend,
+							>(
+								config,
+								collator_key,
+								polkadot_config,
+								parachain_id.into(),
+								cli.disable_block_announcements.then(wrap_announce_block),
+								cli.fail_pov_recovery,
+								|_| Ok(jsonrpsee::RpcModule::new(())),
+								consensus,
+								collator_options,
+								true,
+							)
+							.await,
+					}
+				})
 				.expect("could not create Cumulus test service");
 
 			tokio_runtime
diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml
index 6fa3d41eddb1e93f6e3a2f8c1101cfd7c4eafe75..bee725c0876f0cd57692d754738219dd731f8d62 100644
--- a/polkadot/node/jaeger/Cargo.toml
+++ b/polkadot/node/jaeger/Cargo.toml
@@ -16,6 +16,7 @@ parking_lot = "0.12.1"
 polkadot-primitives = { path = "../../primitives" }
 polkadot-node-primitives = { path = "../primitives" }
 sc-network = { path = "../../../substrate/client/network" }
+sc-network-types = { path = "../../../substrate/client/network/types" }
 sp-core = { path = "../../../substrate/primitives/core" }
 thiserror = { workspace = true }
 tokio = "1.37"
diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs
index 4816fccf3b96e741027d0b0897627486e420d80e..68fa57e2ca14f859b7bcfcf02884deaa34d7d156 100644
--- a/polkadot/node/jaeger/src/spans.rs
+++ b/polkadot/node/jaeger/src/spans.rs
@@ -86,7 +86,7 @@
 use parity_scale_codec::Encode;
 use polkadot_node_primitives::PoV;
 use polkadot_primitives::{BlakeTwo256, CandidateHash, Hash, HashT, Id as ParaId, ValidatorIndex};
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 
 use std::{fmt, sync::Arc};
 
diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs
index d0aee7e5e010499db55ed78b7b9627cd7cab6e9b..214498979fb68307ce45705e396a7e306d53da87 100644
--- a/polkadot/node/network/availability-distribution/src/tests/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs
@@ -19,7 +19,7 @@ use std::collections::HashSet;
 use futures::{executor, future, Future};
 
 use polkadot_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames};
-use polkadot_primitives::{CoreState, Hash};
+use polkadot_primitives::{Block, CoreState, Hash};
 use sp_keystore::KeystorePtr;
 
 use polkadot_node_subsystem_test_helpers as test_helpers;
@@ -44,9 +44,14 @@ fn test_harness<T: Future<Output = ()>>(
 	let genesis_hash = Hash::repeat_byte(0xff);
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None);
 
-	let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (chunk_req_receiver, chunk_req_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 	let subsystem = AvailabilityDistributionSubsystem::new(
 		keystore,
 		IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver },
diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs
index 1d814b4fd0edd9add989dd3191500056d39539c8..93411511e763af437d7813c9e5d83bac4609959e 100644
--- a/polkadot/node/network/availability-distribution/src/tests/state.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/state.rs
@@ -337,7 +337,7 @@ fn to_incoming_req(
 
 			IncomingRequest::new(
 				// We don't really care:
-				network::PeerId::random(),
+				network::PeerId::random().into(),
 				payload,
 				tx,
 			)
diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs
index f1dc5b98c09b895f987c070fee0cf3cca37e036e..909f6a25f46b1eca3074095b6f54f0415d62c562 100644
--- a/polkadot/node/network/availability-recovery/src/tests.rs
+++ b/polkadot/node/network/availability-recovery/src/tests.rs
@@ -40,7 +40,7 @@ use polkadot_node_subsystem_test_helpers::{
 };
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::{
-	AuthorityDiscoveryId, Hash, HeadData, IndexedVec, PersistedValidationData, ValidatorId,
+	AuthorityDiscoveryId, Block, Hash, HeadData, IndexedVec, PersistedValidationData, ValidatorId,
 };
 use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash};
 
@@ -52,7 +52,10 @@ const GENESIS_HASH: Hash = Hash::repeat_byte(0xff);
 fn request_receiver(
 	req_protocol_names: &ReqProtocolNames,
 ) -> IncomingRequestReceiver<AvailableDataFetchingRequest> {
-	let receiver = IncomingRequest::get_config_receiver(req_protocol_names);
+	let receiver = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(req_protocol_names);
 	// Don't close the sending end of the request protocol. Otherwise, the subsystem will terminate.
 	std::mem::forget(receiver.1.inbound_queue);
 	receiver.0
diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs
index 21bed019256ac7a0e747ac041f8db32609f0d065..5691c8413ad997daf80fcd551b4265031d563d88 100644
--- a/polkadot/node/network/bridge/src/network.rs
+++ b/polkadot/node/network/bridge/src/network.rs
@@ -25,9 +25,8 @@ use parking_lot::Mutex;
 use parity_scale_codec::Encode;
 
 use sc_network::{
-	config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, IfDisconnected, MessageSink,
-	NetworkPeers, NetworkRequest, NetworkService, OutboundFailure, ReputationChange,
-	RequestFailure,
+	config::parse_addr, multiaddr::Multiaddr, service::traits::NetworkService, types::ProtocolName,
+	IfDisconnected, MessageSink, OutboundFailure, ReputationChange, RequestFailure,
 };
 
 use polkadot_node_network_protocol::{
@@ -35,7 +34,7 @@ use polkadot_node_network_protocol::{
 	request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests},
 	v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, PeerId,
 };
-use polkadot_primitives::{AuthorityDiscoveryId, Block, Hash};
+use polkadot_primitives::AuthorityDiscoveryId;
 
 use crate::{metrics::Metrics, validator_discovery::AuthorityDiscovery, WireMessage};
 
@@ -232,13 +231,13 @@ pub trait Network: Clone + Send + 'static {
 }
 
 #[async_trait]
-impl Network for Arc<NetworkService<Block, Hash>> {
+impl Network for Arc<dyn NetworkService> {
 	async fn set_reserved_peers(
 		&mut self,
 		protocol: ProtocolName,
 		multiaddresses: HashSet<Multiaddr>,
 	) -> Result<(), String> {
-		NetworkService::set_reserved_peers(&**self, protocol, multiaddresses)
+		<dyn NetworkService>::set_reserved_peers(&**self, protocol, multiaddresses)
 	}
 
 	async fn remove_from_peers_set(
@@ -246,15 +245,15 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 		protocol: ProtocolName,
 		peers: Vec<PeerId>,
 	) -> Result<(), String> {
-		NetworkService::remove_peers_from_reserved_set(&**self, protocol, peers)
+		<dyn NetworkService>::remove_peers_from_reserved_set(&**self, protocol, peers)
 	}
 
 	fn report_peer(&self, who: PeerId, rep: ReputationChange) {
-		NetworkService::report_peer(&**self, who, rep);
+		<dyn NetworkService>::report_peer(&**self, who, rep);
 	}
 
 	fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) {
-		NetworkService::disconnect_peer(&**self, who, protocol);
+		<dyn NetworkService>::disconnect_peer(&**self, who, protocol);
 	}
 
 	async fn start_request<AD: AuthorityDiscovery>(
@@ -289,7 +288,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 						Ok(v) => v,
 						Err(_) => continue,
 					};
-					NetworkService::add_known_address(self, peer_id, addr);
+					<dyn NetworkService>::add_known_address(&**self, peer_id, addr);
 					found_peer_id = Some(peer_id);
 				}
 				found_peer_id
@@ -321,8 +320,8 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 			"Starting request",
 		);
 
-		NetworkService::start_request(
-			self,
+		<dyn NetworkService>::start_request(
+			&**self,
 			peer_id,
 			req_protocol_names.get_name(protocol),
 			payload,
@@ -333,7 +332,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
 	}
 
 	fn peer_role(&self, who: PeerId, handshake: Vec<u8>) -> Option<sc_network::ObservedRole> {
-		NetworkService::peer_role(self, who, handshake)
+		<dyn NetworkService>::peer_role(&**self, who, handshake)
 	}
 }
 
diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs
index 6847b8a7e24db5b13df2873d1b54f37506e76831..6182bf3d883b5be19231d534babf1c70973da00d 100644
--- a/polkadot/node/network/bridge/src/rx/tests.rs
+++ b/polkadot/node/network/bridge/src/rx/tests.rs
@@ -366,13 +366,13 @@ impl NotificationService for TestNotificationService {
 	}
 
 	/// Send synchronous `notification` to `peer`.
-	fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+	fn send_sync_notification(&mut self, _peer: &PeerId, _notification: Vec<u8>) {
 		unimplemented!();
 	}
 
 	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
 	async fn send_async_notification(
-		&self,
+		&mut self,
 		_peer: &PeerId,
 		_notification: Vec<u8>,
 	) -> Result<(), sc_network::error::Error> {
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
index de561e9f77fcb1e2490cb5f5216064094924bfbc..689e03ce4737bcdd39d0651009f212e3cb364eea 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
@@ -45,8 +45,9 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt};
 use polkadot_primitives::{
-	AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo, IndexedVec,
-	NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex,
+	AuthorityDiscoveryId, Block, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo,
+	IndexedVec, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId,
+	ValidatorIndex,
 };
 use polkadot_primitives_test_helpers::TestCandidateBuilder;
 use test_helpers::mock::new_leaf;
@@ -249,10 +250,14 @@ fn test_harness<T: Future<Output = TestHarness>>(
 	let genesis_hash = Hash::repeat_byte(0xff);
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None);
 
-	let (collation_req_receiver, req_v1_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (collation_req_v2_receiver, req_v2_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (collation_req_receiver, req_v1_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (collation_req_v2_receiver, req_v2_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 	let subsystem = async {
 		run_inner(
 			context,
diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
index 5ad790fb01c22e511a2d5ae3d96c0c96f49bfc78..1d0d667f5ccf4a156316110f1493ac180615b877 100644
--- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs
+++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
@@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{
 	subsystem_test_harness, TestSubsystemContextHandle,
 };
 use polkadot_primitives::{
-	AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, Hash, NodeFeatures,
-	SessionIndex, SessionInfo,
+	AuthorityDiscoveryId, Block, CandidateHash, CandidateReceipt, ExecutorParams, Hash,
+	NodeFeatures, SessionIndex, SessionInfo,
 };
 
 use self::mock::{
@@ -879,7 +879,10 @@ where
 
 	let genesis_hash = Hash::repeat_byte(0xff);
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None);
-	let (req_receiver, req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (req_receiver, req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 	let subsystem = DisputeDistributionSubsystem::new(
 		keystore,
 		req_receiver,
diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml
index 81936364897f926cb2eef810bf917c53e5d4e97a..0408e673791114793da1b231baa7448f185b0db6 100644
--- a/polkadot/node/network/protocol/Cargo.toml
+++ b/polkadot/node/network/protocol/Cargo.toml
@@ -18,7 +18,9 @@ polkadot-node-primitives = { path = "../../primitives" }
 polkadot-node-jaeger = { path = "../../jaeger" }
 parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] }
 sc-network = { path = "../../../../substrate/client/network" }
+sc-network-types = { path = "../../../../substrate/client/network/types" }
 sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" }
+sp-runtime = { path = "../../../../substrate/primitives/runtime" }
 strum = { version = "0.26.2", features = ["derive"] }
 futures = "0.3.30"
 thiserror = { workspace = true }
diff --git a/polkadot/node/network/protocol/src/authority_discovery.rs b/polkadot/node/network/protocol/src/authority_discovery.rs
index beb5409d4ac04a58b1ba276fdfd8f7c6cae030f7..fe01f3ccd336d8d1cfaf474a3bae705a69c4f89c 100644
--- a/polkadot/node/network/protocol/src/authority_discovery.rs
+++ b/polkadot/node/network/protocol/src/authority_discovery.rs
@@ -23,7 +23,8 @@ use async_trait::async_trait;
 use sc_authority_discovery::Service as AuthorityDiscoveryService;
 
 use polkadot_primitives::AuthorityDiscoveryId;
-use sc_network::{Multiaddr, PeerId};
+use sc_network::Multiaddr;
+use sc_network_types::PeerId;
 
 /// An abstraction over the authority discovery service.
 ///
diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs
index 4dd94b5eac4fc430d1b28d5d906d922f0e970870..c38838b1ef9845eb85c929066eb7de108374fa0d 100644
--- a/polkadot/node/network/protocol/src/lib.rs
+++ b/polkadot/node/network/protocol/src/lib.rs
@@ -25,7 +25,8 @@ use std::{collections::HashMap, fmt};
 
 #[doc(hidden)]
 pub use polkadot_node_jaeger as jaeger;
-pub use sc_network::{IfDisconnected, PeerId};
+pub use sc_network::IfDisconnected;
+pub use sc_network_types::PeerId;
 #[doc(hidden)]
 pub use std::sync::Arc;
 
@@ -610,7 +611,7 @@ pub mod v1 {
 	///
 	/// The payload is the local peer id of the node, which serves to prove that it
 	/// controls the collator key it is declaring an intention to collate under.
-	pub fn declare_signature_payload(peer_id: &sc_network::PeerId) -> Vec<u8> {
+	pub fn declare_signature_payload(peer_id: &sc_network_types::PeerId) -> Vec<u8> {
 		let mut payload = peer_id.to_bytes();
 		payload.extend_from_slice(b"COLL");
 		payload
@@ -863,7 +864,7 @@ pub mod v2 {
 	///
 	/// The payload is the local peer id of the node, which serves to prove that it
 	/// controls the collator key it is declaring an intention to collate under.
-	pub fn declare_signature_payload(peer_id: &sc_network::PeerId) -> Vec<u8> {
+	pub fn declare_signature_payload(peer_id: &sc_network_types::PeerId) -> Vec<u8> {
 		let mut payload = peer_id.to_bytes();
 		payload.extend_from_slice(b"COLL");
 		payload
diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs
index d0ae5b4a1bf3c5fa6b70fed76551dffbe75fc7f6..f80cbf227945d6f508e53d62fc3ebc0bf512261d 100644
--- a/polkadot/node/network/protocol/src/peer_set.rs
+++ b/polkadot/node/network/protocol/src/peer_set.rs
@@ -19,13 +19,14 @@
 use derive_more::Display;
 use polkadot_primitives::Hash;
 use sc_network::{
-	config::{NonDefaultSetConfig, SetConfig},
-	types::ProtocolName,
-	NotificationService,
+	config::SetConfig, peer_store::PeerStoreProvider, service::NotificationMetrics,
+	types::ProtocolName, NetworkBackend, NotificationService,
 };
+use sp_runtime::traits::Block;
 use std::{
 	collections::{hash_map::Entry, HashMap},
 	ops::{Index, IndexMut},
+	sync::Arc,
 };
 use strum::{EnumIter, IntoEnumIterator};
 
@@ -65,11 +66,13 @@ impl PeerSet {
 	///
 	/// Those should be used in the network configuration to register the protocols with the
 	/// network service.
-	pub fn get_info(
+	pub fn get_info<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 		self,
 		is_authority: IsAuthority,
 		peerset_protocol_names: &PeerSetProtocolNames,
-	) -> (NonDefaultSetConfig, (PeerSet, Box<dyn NotificationService>)) {
+		metrics: NotificationMetrics,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+	) -> (N::NotificationProtocolConfig, (PeerSet, Box<dyn NotificationService>)) {
 		// Networking layer relies on `get_main_name()` being the main name of the protocol
 		// for peersets and connection management.
 		let protocol = peerset_protocol_names.get_main_name(self);
@@ -82,7 +85,7 @@ impl PeerSet {
 
 		match self {
 			PeerSet::Validation => {
-				let (config, notification_service) = NonDefaultSetConfig::new(
+				let (config, notification_service) = N::notification_config(
 					protocol,
 					fallback_names,
 					max_notification_size,
@@ -97,12 +100,14 @@ impl PeerSet {
 						reserved_nodes: Vec::new(),
 						non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept,
 					},
+					metrics,
+					peer_store_handle,
 				);
 
 				(config, (PeerSet::Validation, notification_service))
 			},
 			PeerSet::Collation => {
-				let (config, notification_service) = NonDefaultSetConfig::new(
+				let (config, notification_service) = N::notification_config(
 					protocol,
 					fallback_names,
 					max_notification_size,
@@ -119,6 +124,8 @@ impl PeerSet {
 							sc_network::config::NonReservedPeerMode::Deny
 						},
 					},
+					metrics,
+					peer_store_handle,
 				);
 
 				(config, (PeerSet::Collation, notification_service))
@@ -207,12 +214,21 @@ impl<T> IndexMut<PeerSet> for PerPeerSet<T> {
 ///
 /// Should be used during network configuration (added to `NetworkConfiguration::extra_sets`)
 /// or shortly after startup to register the protocols with the network service.
-pub fn peer_sets_info(
+pub fn peer_sets_info<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 	is_authority: IsAuthority,
 	peerset_protocol_names: &PeerSetProtocolNames,
-) -> Vec<(NonDefaultSetConfig, (PeerSet, Box<dyn NotificationService>))> {
+	metrics: NotificationMetrics,
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
+) -> Vec<(N::NotificationProtocolConfig, (PeerSet, Box<dyn NotificationService>))> {
 	PeerSet::iter()
-		.map(|s| s.get_info(is_authority, &peerset_protocol_names))
+		.map(|s| {
+			s.get_info::<B, N>(
+				is_authority,
+				&peerset_protocol_names,
+				metrics.clone(),
+				Arc::clone(&peer_store_handle),
+			)
+		})
 		.collect()
 }
 
diff --git a/polkadot/node/network/protocol/src/request_response/incoming/error.rs b/polkadot/node/network/protocol/src/request_response/incoming/error.rs
index efc3d8ecfcd4f6ab341a28dbfdf660f53190f7ff..7de9d919058ab05d3c91832c7d9829173d948e43 100644
--- a/polkadot/node/network/protocol/src/request_response/incoming/error.rs
+++ b/polkadot/node/network/protocol/src/request_response/incoming/error.rs
@@ -16,7 +16,7 @@
 
 //! Error handling related code and Error/Result definitions.
 
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 
 use parity_scale_codec::Error as DecodingError;
 
diff --git a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs
index 1d7c4a63e0c31e9440c1386909fe198a6909f740..e85390729ee3f99f4a74b4cf2b90ac225c819fc6 100644
--- a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs
+++ b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs
@@ -20,7 +20,9 @@ use futures::{channel::oneshot, StreamExt};
 
 use parity_scale_codec::{Decode, Encode};
 
-use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId};
+use sc_network::{config as netconfig, NetworkBackend};
+use sc_network_types::PeerId;
+use sp_runtime::traits::Block;
 
 use super::{IsRequest, ReqProtocolNames};
 use crate::UnifiedReputationChange;
@@ -52,10 +54,10 @@ where
 	///
 	/// This Register that config with substrate networking and receive incoming requests via the
 	/// returned `IncomingRequestReceiver`.
-	pub fn get_config_receiver(
+	pub fn get_config_receiver<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 		req_protocol_names: &ReqProtocolNames,
-	) -> (IncomingRequestReceiver<Req>, RequestResponseConfig) {
-		let (raw, cfg) = Req::PROTOCOL.get_config(req_protocol_names);
+	) -> (IncomingRequestReceiver<Req>, N::RequestResponseProtocolConfig) {
+		let (raw, cfg) = Req::PROTOCOL.get_config::<B, N>(req_protocol_names);
 		(IncomingRequestReceiver { raw, phantom: PhantomData {} }, cfg)
 	}
 
diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs
index 87217bf084fb9277a3e2930cb67f6a5a5ac1dcc8..cab02bb88a00b429a80ce25e342fd38e25b3f3d5 100644
--- a/polkadot/node/network/protocol/src/request_response/mod.rs
+++ b/polkadot/node/network/protocol/src/request_response/mod.rs
@@ -52,6 +52,8 @@
 use std::{collections::HashMap, time::Duration, u64};
 
 use polkadot_primitives::{MAX_CODE_SIZE, MAX_POV_SIZE};
+use sc_network::NetworkBackend;
+use sp_runtime::traits::Block;
 use strum::{EnumIter, IntoEnumIterator};
 
 pub use sc_network::{config as network, config::RequestResponseConfig, ProtocolName};
@@ -179,76 +181,76 @@ impl Protocol {
 	///
 	/// Returns a `ProtocolConfig` for this protocol.
 	/// Use this if you plan only to send requests for this protocol.
-	pub fn get_outbound_only_config(
+	pub fn get_outbound_only_config<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 		self,
 		req_protocol_names: &ReqProtocolNames,
-	) -> RequestResponseConfig {
-		self.create_config(req_protocol_names, None)
+	) -> N::RequestResponseProtocolConfig {
+		self.create_config::<B, N>(req_protocol_names, None)
 	}
 
 	/// Get a configuration for a given Request response protocol.
 	///
 	/// Returns a receiver for messages received on this protocol and the requested
 	/// `ProtocolConfig`.
-	pub fn get_config(
+	pub fn get_config<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 		self,
 		req_protocol_names: &ReqProtocolNames,
-	) -> (async_channel::Receiver<network::IncomingRequest>, RequestResponseConfig) {
+	) -> (async_channel::Receiver<network::IncomingRequest>, N::RequestResponseProtocolConfig) {
 		let (tx, rx) = async_channel::bounded(self.get_channel_size());
-		let cfg = self.create_config(req_protocol_names, Some(tx));
+		let cfg = self.create_config::<B, N>(req_protocol_names, Some(tx));
 		(rx, cfg)
 	}
 
-	fn create_config(
+	fn create_config<B: Block, N: NetworkBackend<B, <B as Block>::Hash>>(
 		self,
 		req_protocol_names: &ReqProtocolNames,
 		tx: Option<async_channel::Sender<network::IncomingRequest>>,
-	) -> RequestResponseConfig {
+	) -> N::RequestResponseProtocolConfig {
 		let name = req_protocol_names.get_name(self);
 		let legacy_names = self.get_legacy_name().into_iter().map(Into::into).collect();
 		match self {
-			Protocol::ChunkFetchingV1 => RequestResponseConfig {
+			Protocol::ChunkFetchingV1 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
-				max_response_size: POV_RESPONSE_SIZE as u64 * 3,
+				legacy_names,
+				1_000,
+				POV_RESPONSE_SIZE as u64 * 3,
 				// We are connected to all validators:
-				request_timeout: CHUNK_REQUEST_TIMEOUT,
-				inbound_queue: tx,
-			},
+				CHUNK_REQUEST_TIMEOUT,
+				tx,
+			),
 			Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 =>
-				RequestResponseConfig {
+				N::request_response_config(
 					name,
-					fallback_names: legacy_names,
-					max_request_size: 1_000,
-					max_response_size: POV_RESPONSE_SIZE,
+					legacy_names,
+					1_000,
+					POV_RESPONSE_SIZE,
 					// Taken from initial implementation in collator protocol:
-					request_timeout: POV_REQUEST_TIMEOUT_CONNECTED,
-					inbound_queue: tx,
-				},
-			Protocol::PoVFetchingV1 => RequestResponseConfig {
+					POV_REQUEST_TIMEOUT_CONNECTED,
+					tx,
+				),
+			Protocol::PoVFetchingV1 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
-				max_response_size: POV_RESPONSE_SIZE,
-				request_timeout: POV_REQUEST_TIMEOUT_CONNECTED,
-				inbound_queue: tx,
-			},
-			Protocol::AvailableDataFetchingV1 => RequestResponseConfig {
+				legacy_names,
+				1_000,
+				POV_RESPONSE_SIZE,
+				POV_REQUEST_TIMEOUT_CONNECTED,
+				tx,
+			),
+			Protocol::AvailableDataFetchingV1 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
+				legacy_names,
+				1_000,
 				// Available data size is dominated by the PoV size.
-				max_response_size: POV_RESPONSE_SIZE,
-				request_timeout: POV_REQUEST_TIMEOUT_CONNECTED,
-				inbound_queue: tx,
-			},
-			Protocol::StatementFetchingV1 => RequestResponseConfig {
+				POV_RESPONSE_SIZE,
+				POV_REQUEST_TIMEOUT_CONNECTED,
+				tx,
+			),
+			Protocol::StatementFetchingV1 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
+				legacy_names,
+				1_000,
 				// Available data size is dominated code size.
-				max_response_size: STATEMENT_RESPONSE_SIZE,
+				STATEMENT_RESPONSE_SIZE,
 				// We need statement fetching to be fast and will try our best at the responding
 				// side to answer requests within that timeout, assuming a bandwidth of 500Mbit/s
 				// - which is the recommended minimum bandwidth for nodes on Kusama as of April
@@ -258,27 +260,27 @@ impl Protocol {
 				// waiting for timeout on an overloaded node.  Fetches from slow nodes will likely
 				// fail, but this is desired, so we can quickly move on to a faster one - we should
 				// also decrease its reputation.
-				request_timeout: Duration::from_secs(1),
-				inbound_queue: tx,
-			},
-			Protocol::DisputeSendingV1 => RequestResponseConfig {
+				Duration::from_secs(1),
+				tx,
+			),
+			Protocol::DisputeSendingV1 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
+				legacy_names,
+				1_000,
 				// Responses are just confirmation, in essence not even a bit. So 100 seems
 				// plenty.
-				max_response_size: 100,
-				request_timeout: DISPUTE_REQUEST_TIMEOUT,
-				inbound_queue: tx,
-			},
-			Protocol::AttestedCandidateV2 => RequestResponseConfig {
+				100,
+				DISPUTE_REQUEST_TIMEOUT,
+				tx,
+			),
+			Protocol::AttestedCandidateV2 => N::request_response_config(
 				name,
-				fallback_names: legacy_names,
-				max_request_size: 1_000,
-				max_response_size: ATTESTED_CANDIDATE_RESPONSE_SIZE,
-				request_timeout: ATTESTED_CANDIDATE_TIMEOUT,
-				inbound_queue: tx,
-			},
+				legacy_names,
+				1_000,
+				ATTESTED_CANDIDATE_RESPONSE_SIZE,
+				ATTESTED_CANDIDATE_TIMEOUT,
+				tx,
+			),
 		}
 	}
 
diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs
index 88439ad40367d7303b63a7fa97ebfb6fd9bb89e4..96ef4a6ab25dcc13949e6b3dfbc0e8c6856393f5 100644
--- a/polkadot/node/network/protocol/src/request_response/outgoing.rs
+++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs
@@ -20,7 +20,7 @@ use network::ProtocolName;
 use parity_scale_codec::{Decode, Encode, Error as DecodingError};
 
 use sc_network as network;
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 
 use polkadot_primitives::AuthorityDiscoveryId;
 
diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
index 7d355cc887255a377ce54f786f68b13cd80895c5..0dea5ad0996e787553bfb597d455460208cc5e21 100644
--- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
+++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
@@ -43,7 +43,7 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf};
 use polkadot_primitives::{
-	ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures,
+	Block, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures,
 	SessionInfo, ValidationCode,
 };
 use polkadot_primitives_test_helpers::{
@@ -768,8 +768,14 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 
 	let bg = async move {
 		let s = StatementDistributionSubsystem {
@@ -1016,9 +1022,14 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, mut req_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 
 	let bg = async move {
 		let s = StatementDistributionSubsystem {
@@ -1554,8 +1565,14 @@ fn delay_reputation_changes() {
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 
 	let reputation_interval = Duration::from_millis(100);
 
@@ -2044,9 +2061,14 @@ fn share_prioritizes_backing_group() {
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, mut req_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 
 	let bg = async move {
 		let s = StatementDistributionSubsystem {
@@ -2377,8 +2399,14 @@ fn peer_cant_flood_with_large_statements() {
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 	let bg = async move {
 		let s = StatementDistributionSubsystem {
 			keystore: make_ferdie_keystore(),
@@ -2610,8 +2638,14 @@ fn handle_multiple_seconded_statements() {
 	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
 
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 
 	let virtual_overseer_fut = async move {
 		let s = StatementDistributionSubsystem {
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
index e98b11079312b0539a39d82bdd57f4a8716112ee..8dda7219cd1251ccd7869408c51e80ca01513c2b 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
@@ -33,7 +33,7 @@ use polkadot_node_subsystem::messages::{
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::{
-	AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState,
+	AssignmentPair, AsyncBackingParams, Block, BlockNumber, CommittedCandidateReceipt, CoreState,
 	GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore,
 	SessionIndex, SessionInfo, ValidatorPair,
 };
@@ -359,9 +359,14 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 		Arc::new(LocalKeystore::in_memory()) as KeystorePtr
 	};
 	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (candidate_req_receiver, req_cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
 	let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
 
 	let test_state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng);
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 61076477f8e72fdc13d2357e8ff9100bb5836c0f..22231c84b1d9c377c24bf689e7c5a88d40dba62b 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -655,7 +655,7 @@ pub struct NewFull {
 	pub task_manager: TaskManager,
 	pub client: Arc<FullClient>,
 	pub overseer_handle: Option<Handle>,
-	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
+	pub network: Arc<dyn sc_network::service::traits::NetworkService>,
 	pub sync_service: Arc<sc_network_sync::SyncingService<Block>>,
 	pub rpc_handlers: RpcHandlers,
 	pub backend: Arc<FullBackend>,
@@ -719,7 +719,10 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig {
 /// searched. If the path points to an executable rather then directory, that executable is used
 /// both as preparation and execution worker (supposed to be used for tests only).
 #[cfg(feature = "full-node")]
-pub fn new_full<OverseerGenerator: OverseerGen>(
+pub fn new_full<
+	OverseerGenerator: OverseerGen,
+	Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
+>(
 	mut config: Configuration,
 	NewFullParams {
 		is_parachain_node,
@@ -805,19 +808,29 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry),
 	} = new_partial::<SelectRelayChain<_>>(&mut config, basics, select_chain)?;
 
+	let metrics = Network::register_notification_metrics(
+		config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+	);
 	let shared_voter_state = rpc_setup;
 	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
 	let auth_disc_public_addresses = config.network.public_addresses.clone();
-	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
+
+	let mut net_config =
+		sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network);
 
 	let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
+	let peer_store_handle = net_config.peer_store_handle();
 
 	// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
 	// anything in terms of behaviour, but makes the logs more consistent with the other
 	// Substrate nodes.
 	let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
 	let (grandpa_protocol_config, grandpa_notification_service) =
-		grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+		grandpa::grandpa_peers_set_config::<_, Network>(
+			grandpa_protocol_name.clone(),
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
+		);
 	net_config.add_notification_protocol(grandpa_protocol_config);
 
 	let beefy_gossip_proto_name =
@@ -825,7 +838,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	// `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run,
 	// while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`.
 	let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) =
-		beefy::communication::request_response::BeefyJustifsRequestHandler::new(
+		beefy::communication::request_response::BeefyJustifsRequestHandler::new::<_, Network>(
 			&genesis_hash,
 			config.chain_spec.fork_id(),
 			client.clone(),
@@ -835,7 +848,11 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 		false => None,
 		true => {
 			let (beefy_notification_config, beefy_notification_service) =
-				beefy::communication::beefy_peers_set_config(beefy_gossip_proto_name.clone());
+				beefy::communication::beefy_peers_set_config::<_, Network>(
+					beefy_gossip_proto_name.clone(),
+					metrics.clone(),
+					Arc::clone(&peer_store_handle),
+				);
 
 			net_config.add_notification_protocol(beefy_notification_config);
 			net_config.add_request_response_protocol(beefy_req_resp_cfg);
@@ -857,13 +874,18 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 			use polkadot_network_bridge::{peer_sets_info, IsAuthority};
 			let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
 
-			peer_sets_info(is_authority, &peerset_protocol_names)
-				.into_iter()
-				.map(|(config, (peerset, service))| {
-					net_config.add_notification_protocol(config);
-					(peerset, service)
-				})
-				.collect::<HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>()
+			peer_sets_info::<_, Network>(
+				is_authority,
+				&peerset_protocol_names,
+				metrics.clone(),
+				Arc::clone(&peer_store_handle),
+			)
+			.into_iter()
+			.map(|(config, (peerset, service))| {
+				net_config.add_notification_protocol(config);
+				(peerset, service)
+			})
+			.collect::<HashMap<PeerSet, Box<dyn sc_network::NotificationService>>>()
 		} else {
 			std::collections::HashMap::new()
 		};
@@ -871,17 +893,19 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id());
 
 	let (collation_req_v1_receiver, cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
 	let (collation_req_v2_receiver, cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
 	let (available_data_req_receiver, cfg) =
-		IncomingRequest::get_config_receiver(&req_protocol_names);
+		IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
-	let (pov_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (pov_req_receiver, cfg) =
+		IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
-	let (chunk_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
+	let (chunk_req_receiver, cfg) =
+		IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
 
 	let grandpa_hard_forks = if config.chain_spec.is_kusama() {
@@ -924,12 +948,13 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 			None
 		};
 		let (statement_req_receiver, cfg) =
-			IncomingRequest::get_config_receiver(&req_protocol_names);
+			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 		net_config.add_request_response_protocol(cfg);
 		let (candidate_req_v2_receiver, cfg) =
-			IncomingRequest::get_config_receiver(&req_protocol_names);
+			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 		net_config.add_request_response_protocol(cfg);
-		let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
+		let (dispute_req_receiver, cfg) =
+			IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names);
 		net_config.add_request_response_protocol(cfg);
 		let approval_voting_config = ApprovalVotingConfig {
 			col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data,
@@ -970,6 +995,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 			block_announce_validator_builder: None,
 			warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
 			block_relay: None,
+			metrics,
 		})?;
 
 	if config.offchain_worker.enabled {
@@ -985,7 +1011,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 				transaction_pool: Some(OffchainTransactionPoolFactory::new(
 					transaction_pool.clone(),
 				)),
-				network_provider: network.clone(),
+				network_provider: Arc::new(network.clone()),
 				is_validator: role.is_authority(),
 				enable_http_requests: false,
 				custom_extensions: move |_| vec![],
@@ -1068,7 +1094,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 					..Default::default()
 				},
 				client.clone(),
-				network.clone(),
+				Arc::new(network.clone()),
 				Box::pin(dht_event_stream),
 				authority_discovery_role,
 				prometheus_registry.clone(),
@@ -1214,7 +1240,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	if let Some(notification_service) = beefy_notification_service {
 		let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name();
 		let network_params = beefy::BeefyNetworkParams {
-			network: network.clone(),
+			network: Arc::new(network.clone()),
 			sync: sync_service.clone(),
 			gossip_protocol_name: beefy_gossip_proto_name,
 			justifications_protocol_name,
@@ -1383,7 +1409,12 @@ pub fn build_full<OverseerGenerator: OverseerGen>(
 			capacity
 		});
 
-	new_full(config, params)
+	match config.network.network_backend {
+		sc_network::config::NetworkBackendType::Libp2p =>
+			new_full::<_, sc_network::NetworkWorker<Block, Hash>>(config, params),
+		sc_network::config::NetworkBackendType::Litep2p =>
+			new_full::<_, sc_network::Litep2pNetworkBackend>(config, params),
+	}
 }
 
 /// Reverts the node state down to at most the last finalized block.
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index 9575b2458a259dfd2716aec6d688a95a73f5530d..26b1446bf515afa342bd53c6237a87e7e750022d 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use super::{Block, Error, Hash, IsParachainNode, Registry};
+use super::{Error, IsParachainNode, Registry};
 use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient};
 use polkadot_overseer::{DummySubsystem, InitializedOverseerBuilder, SubsystemError};
 use sp_core::traits::SpawnNamed;
@@ -80,7 +80,7 @@ where
 	/// Runtime client generic, providing the `ProvideRuntimeApi` trait besides others.
 	pub runtime_client: Arc<RuntimeClient>,
 	/// Underlying network service implementation.
-	pub network_service: Arc<sc_network::NetworkService<Block, Hash>>,
+	pub network_service: Arc<dyn sc_network::service::traits::NetworkService>,
 	/// Underlying syncing service implementation.
 	pub sync_service: Arc<dyn consensus_common::SyncOracle + Send + Sync>,
 	/// Underlying authority discovery service.
@@ -183,11 +183,11 @@ pub fn validator_overseer_builder<Spawner, RuntimeClient>(
 		RuntimeApiSubsystem<RuntimeClient>,
 		AvailabilityStoreSubsystem,
 		NetworkBridgeRxSubsystem<
-			Arc<sc_network::NetworkService<Block, Hash>>,
+			Arc<dyn sc_network::service::traits::NetworkService>,
 			AuthorityDiscoveryService,
 		>,
 		NetworkBridgeTxSubsystem<
-			Arc<sc_network::NetworkService<Block, Hash>>,
+			Arc<dyn sc_network::service::traits::NetworkService>,
 			AuthorityDiscoveryService,
 		>,
 		ChainApiSubsystem<RuntimeClient>,
@@ -369,11 +369,11 @@ pub fn collator_overseer_builder<Spawner, RuntimeClient>(
 		RuntimeApiSubsystem<RuntimeClient>,
 		DummySubsystem,
 		NetworkBridgeRxSubsystem<
-			Arc<sc_network::NetworkService<Block, Hash>>,
+			Arc<dyn sc_network::service::traits::NetworkService>,
 			AuthorityDiscoveryService,
 		>,
 		NetworkBridgeTxSubsystem<
-			Arc<sc_network::NetworkService<Block, Hash>>,
+			Arc<dyn sc_network::service::traits::NetworkService>,
 			AuthorityDiscoveryService,
 		>,
 		ChainApiSubsystem<RuntimeClient>,
diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml
index 37224d110e8813dbc7b66c2e3dce4a78b617f459..e534ac18e4b36bdb40737d4ad7052a008c0497e7 100644
--- a/polkadot/node/subsystem-bench/Cargo.toml
+++ b/polkadot/node/subsystem-bench/Cargo.toml
@@ -62,6 +62,7 @@ polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
 sp-keyring = { path = "../../../substrate/primitives/keyring" }
 sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" }
 sc-network = { path = "../../../substrate/client/network" }
+sc-network-types = { path = "../../../substrate/client/network/types" }
 sc-service = { path = "../../../substrate/client/service" }
 sp-consensus = { path = "../../../substrate/primitives/consensus/common" }
 polkadot-node-metrics = { path = "../metrics" }
diff --git a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
index af5ff5aa1facc11f2d0caa8ec77276976a0554a5..ca58875c81393eebfea236e4fee0c25ca9d3a023 100644
--- a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
@@ -32,7 +32,7 @@ use polkadot_primitives::{
 use polkadot_primitives_test_helpers::dummy_candidate_receipt_bad_sig;
 use rand::{seq::SliceRandom, SeedableRng};
 use rand_chacha::ChaCha20Rng;
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 use sp_consensus_babe::{
 	digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest},
 	AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, VrfSignature, VrfTranscript,
diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
index 619a3617ca4ddd1ce13008be110415ab432e0405..219b2cb515d7da5df52df9902c0da28e09838763 100644
--- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
@@ -48,7 +48,7 @@ use rand::{seq::SliceRandom, RngCore, SeedableRng};
 use rand_chacha::ChaCha20Rng;
 use rand_distr::{Distribution, Normal};
 use sc_keystore::LocalKeystore;
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 use sc_service::SpawnTaskHandle;
 use sha1::Digest;
 use sp_application_crypto::AppCrypto;
diff --git a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
index f55ed99205ede2b8ed254248d9a024bde3d46a11..9641b62a94d8a8612cd394b5964fd66eb1d6a0b4 100644
--- a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
@@ -22,7 +22,7 @@ use itertools::Itertools;
 use parity_scale_codec::{Decode, Encode};
 use polkadot_node_network_protocol::v3 as protocol_v3;
 use polkadot_primitives::{CandidateIndex, Hash, ValidatorIndex};
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 use std::collections::{HashMap, HashSet};
 
 #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs
index fe98666906183df9a987a692b4a8be646eab60be..5b93c3d862de683ea35dde9f0a9e313b1a77ad4d 100644
--- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs
@@ -51,8 +51,9 @@ use polkadot_node_subsystem_types::{
 	Span,
 };
 use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle};
-use polkadot_primitives::GroupIndex;
+use polkadot_primitives::{Block, GroupIndex, Hash};
 use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig};
+
 use sc_service::SpawnTaskHandle;
 use serde::{Deserialize, Serialize};
 use std::{ops::Sub, sync::Arc, time::Instant};
@@ -140,20 +141,32 @@ pub fn prepare_test(
 	mode: TestDataAvailability,
 	with_prometheus_endpoint: bool,
 ) -> (TestEnvironment, Vec<ProtocolConfig>) {
-	let (collation_req_receiver, collation_req_cfg) =
-		IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None));
-	let (pov_req_receiver, pov_req_cfg) =
-		IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None));
-	let (chunk_req_receiver, chunk_req_cfg) =
-		IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None));
-	let req_cfgs = vec![collation_req_cfg, pov_req_cfg];
-
 	let dependencies = TestEnvironmentDependencies::default();
 	let availability_state = NetworkAvailabilityState {
 		candidate_hashes: state.candidate_hashes.clone(),
 		available_data: state.available_data.clone(),
 		chunks: state.chunks.clone(),
 	};
+
+	let mut req_cfgs = Vec::new();
+
+	let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&ReqProtocolNames::new(GENESIS_HASH, None));
+	req_cfgs.push(collation_req_cfg);
+
+	let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&ReqProtocolNames::new(GENESIS_HASH, None));
+
+	let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&ReqProtocolNames::new(GENESIS_HASH, None));
+	req_cfgs.push(pov_req_cfg);
+
 	let (network, network_interface, network_receiver) = new_network(
 		&state.config,
 		&dependencies,
diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs
index 5725a5137ec4bb602db15f88dfce6a300f1c4bcd..1e0efb72a7dff163898bcf6974ce9bc048bfa086 100644
--- a/polkadot/node/subsystem-bench/src/lib/configuration.rs
+++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs
@@ -21,7 +21,7 @@ use itertools::Itertools;
 use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId};
 use rand::thread_rng;
 use rand_distr::{Distribution, Normal, Uniform};
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 use serde::{Deserialize, Serialize};
 use sp_consensus_babe::AuthorityId;
 use std::collections::HashMap;
diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs
index 0f7b7d741e778e4535ccad1fe032f39d290e3ed2..9bf2415e5a864a9c4512350b9387b6c172b37114 100644
--- a/polkadot/node/subsystem-bench/src/lib/network.rs
+++ b/polkadot/node/subsystem-bench/src/lib/network.rs
@@ -67,8 +67,9 @@ use prometheus_endpoint::U64;
 use rand::{seq::SliceRandom, thread_rng};
 use sc_network::{
 	request_responses::{IncomingRequest, OutgoingResponse},
-	PeerId, RequestFailure,
+	RequestFailure,
 };
+use sc_network_types::PeerId;
 use sc_service::SpawnTaskHandle;
 use std::{
 	collections::HashMap,
diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml
index 1019077638774ca62bdb370b3ed9425254935d90..93dd43c5dbfc495ff92fa76f28e3ad80992b8a6a 100644
--- a/polkadot/node/subsystem-types/Cargo.toml
+++ b/polkadot/node/subsystem-types/Cargo.toml
@@ -19,6 +19,7 @@ polkadot-statement-table = { path = "../../statement-table" }
 polkadot-node-jaeger = { path = "../jaeger" }
 orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] }
 sc-network = { path = "../../../substrate/client/network" }
+sc-network-types = { path = "../../../substrate/client/network/types" }
 sp-api = { path = "../../../substrate/primitives/api" }
 sp-blockchain = { path = "../../../substrate/primitives/blockchain" }
 sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" }
diff --git a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
index e6d7f64784ca0cffd1699fa59f7dcfce1cbd35a9..fa2c7687b38a52ed1a02cdd1373ab5fd41552470 100644
--- a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
+++ b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
@@ -16,7 +16,8 @@
 
 use std::{collections::HashSet, convert::TryFrom};
 
-pub use sc_network::{PeerId, ReputationChange};
+pub use sc_network::ReputationChange;
+pub use sc_network_types::PeerId;
 
 use polkadot_node_network_protocol::{
 	grid_topology::SessionGridTopology, peer_set::ProtocolVersion, ObservedRole, OurView, View,
diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs
index eed11e62c21e4b811de7f286c2757a35ed5f19cf..d313c19333483095291b09c79103d681961dbff6 100644
--- a/polkadot/node/test/service/src/lib.rs
+++ b/polkadot/node/test/service/src/lib.rs
@@ -79,24 +79,46 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 ) -> Result<NewFull, Error> {
 	let workers_path = Some(workers_path.unwrap_or_else(get_relative_workers_path_for_test));
 
-	polkadot_service::new_full(
-		config,
-		polkadot_service::NewFullParams {
-			is_parachain_node,
-			enable_beefy: true,
-			force_authoring_backoff: false,
-			jaeger_agent: None,
-			telemetry_worker_handle: None,
-			node_version: None,
-			secure_validator_mode: false,
-			workers_path,
-			workers_names: None,
-			overseer_gen,
-			overseer_message_channel_capacity_override: None,
-			malus_finality_delay: None,
-			hwbench: None,
-		},
-	)
+	match config.network.network_backend {
+		sc_network::config::NetworkBackendType::Libp2p =>
+			polkadot_service::new_full::<_, sc_network::NetworkWorker<_, _>>(
+				config,
+				polkadot_service::NewFullParams {
+					is_parachain_node,
+					enable_beefy: true,
+					force_authoring_backoff: false,
+					jaeger_agent: None,
+					telemetry_worker_handle: None,
+					node_version: None,
+					secure_validator_mode: false,
+					workers_path,
+					workers_names: None,
+					overseer_gen,
+					overseer_message_channel_capacity_override: None,
+					malus_finality_delay: None,
+					hwbench: None,
+				},
+			),
+		sc_network::config::NetworkBackendType::Litep2p =>
+			polkadot_service::new_full::<_, sc_network::Litep2pNetworkBackend>(
+				config,
+				polkadot_service::NewFullParams {
+					is_parachain_node,
+					enable_beefy: true,
+					force_authoring_backoff: false,
+					jaeger_agent: None,
+					telemetry_worker_handle: None,
+					node_version: None,
+					secure_validator_mode: false,
+					workers_path,
+					workers_names: None,
+					overseer_gen,
+					overseer_message_channel_capacity_override: None,
+					malus_finality_delay: None,
+					hwbench: None,
+				},
+			),
+	}
 }
 
 fn get_relative_workers_path_for_test() -> PathBuf {
diff --git a/prdoc/pr_2944.prdoc b/prdoc/pr_2944.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..eafce7396c082e093d19c5b30ea1a2a8366b1c44
--- /dev/null
+++ b/prdoc/pr_2944.prdoc
@@ -0,0 +1,28 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: "Integrate litep2p into Polkadot SDK"
+
+doc:
+  - audience: Node Dev
+    description: |
+      litep2p is a libp2p-compatible P2P networking library. It supports all of the features of rust-libp2p
+      that are currently being utilized by Polkadot SDK and is a drop-in replacement for any node operator.
+
+      For node developers, introduction of litep2p implies specifying the networking backend that Polkadot SDK
+      shall use for P2P networking. A new trait called `NetworkBackend` is introduced which is implemented
+      by both the libp2p and litep2p backends and which is used to initialize any networking-related code.
+
+  - audience: Node Operator
+    description: |
+      litep2p is considered experimental and rust-libp2p will remain as the default networking backend
+      for Polkadot SDK but litep2p can be selected with `--network-backend litep2p`.
+
+crates:
+  - name: "sc-network"
+  - name: "sc-service"
+  - name: "minimal-template-node"
+  - name: "solochain-template-node"
+  - name: "staging-node-cli"
+  - name: "polkadot-service"
+  - name: "parachain-template-node"
diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs
index d04780d5f9535791f5e904bbc89405355cbbd0ce..f60610873d8c5a3832c77864a2c93fed023998ea 100644
--- a/substrate/bin/node/cli/benches/block_production.rs
+++ b/substrate/bin/node/cli/benches/block_production.rs
@@ -104,8 +104,13 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		wasm_runtime_overrides: None,
 	};
 
-	node_cli::service::new_full_base(config, None, false, |_, _| ())
-		.expect("creating a full node doesn't fail")
+	node_cli::service::new_full_base::<sc_network::NetworkWorker<_, _>>(
+		config,
+		None,
+		false,
+		|_, _| (),
+	)
+	.expect("creating a full node doesn't fail")
 }
 
 fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic {
diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs
index de4eef1944d41bc6f37d06819ad70ca0a6dad109..1906ae697e9007ece7539757dbd40d9f504007bd 100644
--- a/substrate/bin/node/cli/benches/transaction_pool.rs
+++ b/substrate/bin/node/cli/benches/transaction_pool.rs
@@ -101,7 +101,13 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 	};
 
 	tokio_handle.block_on(async move {
-		node_cli::service::new_full_base(config, None, false, |_, _| ()).expect("Creates node")
+		node_cli::service::new_full_base::<sc_network::NetworkWorker<_, _>>(
+			config,
+			None,
+			false,
+			|_, _| (),
+		)
+		.expect("Creates node")
 	})
 }
 
diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs
index b6e8fb8a14edfa22a4d221515185b2748bc733c7..d48d4a50f85f2672d5e65c639a6a0b200c06756c 100644
--- a/substrate/bin/node/cli/src/chain_spec.rs
+++ b/substrate/bin/node/cli/src/chain_spec.rs
@@ -513,7 +513,7 @@ pub(crate) mod tests {
 
 		sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| {
 			let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
-				new_full_base(config, None, false, |_, _| ())?;
+				new_full_base::<sc_network::NetworkWorker<_, _>>(config, None, false, |_, _| ())?;
 			Ok(sc_service_test::TestNetComponents::new(
 				task_manager,
 				client,
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index d6e2a29d30b8ab99f45890424e99c60c26342151..5dc1193daf8d67f6f9067eb42a161dab9242efed 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -29,7 +29,9 @@ use kitchensink_runtime::RuntimeApi;
 use node_primitives::Block;
 use sc_client_api::{Backend, BlockBackend};
 use sc_consensus_babe::{self, SlotProportion};
-use sc_network::{event::Event, NetworkEventStream, NetworkService};
+use sc_network::{
+	event::Event, service::traits::NetworkService, NetworkBackend, NetworkEventStream,
+};
 use sc_network_sync::{strategy::warp::WarpSyncParams, SyncingService};
 use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager};
 use sc_statement_store::Store as StatementStore;
@@ -368,7 +370,7 @@ pub struct NewFullBase {
 	/// The client instance of the node.
 	pub client: Arc<FullClient>,
 	/// The networking service of the node.
-	pub network: Arc<NetworkService<Block, <Block as BlockT>::Hash>>,
+	pub network: Arc<dyn NetworkService>,
 	/// The syncing service of the node.
 	pub sync: Arc<SyncingService<Block>>,
 	/// The transaction pool of the node.
@@ -378,7 +380,7 @@ pub struct NewFullBase {
 }
 
 /// Creates a full service from the configuration.
-pub fn new_full_base(
+pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
 	config: Configuration,
 	mixnet_config: Option<sc_mixnet::Config>,
 	disable_hardware_benchmarks: bool,
@@ -420,15 +422,26 @@ pub fn new_full_base(
 			(rpc_builder, import_setup, rpc_setup, mut telemetry, statement_store, mixnet_api_backend),
 	} = new_partial(&config, mixnet_config.as_ref())?;
 
+	let metrics = N::register_notification_metrics(
+		config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+	);
 	let shared_voter_state = rpc_setup;
 	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
 	let auth_disc_public_addresses = config.network.public_addresses.clone();
-	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
+
+	let mut net_config =
+		sc_network::config::FullNetworkConfiguration::<_, _, N>::new(&config.network);
+
 	let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
+	let peer_store_handle = net_config.peer_store_handle();
 
 	let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec);
 	let (grandpa_protocol_config, grandpa_notification_service) =
-		grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+		grandpa::grandpa_peers_set_config::<_, N>(
+			grandpa_protocol_name.clone(),
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
+		);
 	net_config.add_notification_protocol(grandpa_protocol_config);
 
 	let beefy_gossip_proto_name =
@@ -436,7 +449,7 @@ pub fn new_full_base(
 	// `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run,
 	// while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`.
 	let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) =
-		beefy::communication::request_response::BeefyJustifsRequestHandler::new(
+		beefy::communication::request_response::BeefyJustifsRequestHandler::new::<_, N>(
 			&genesis_hash,
 			config.chain_spec.fork_id(),
 			client.clone(),
@@ -444,23 +457,33 @@ pub fn new_full_base(
 		);
 
 	let (beefy_notification_config, beefy_notification_service) =
-		beefy::communication::beefy_peers_set_config(beefy_gossip_proto_name.clone());
+		beefy::communication::beefy_peers_set_config::<_, N>(
+			beefy_gossip_proto_name.clone(),
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
+		);
 
 	net_config.add_notification_protocol(beefy_notification_config);
 	net_config.add_request_response_protocol(beefy_req_resp_cfg);
 
 	let (statement_handler_proto, statement_config) =
-		sc_network_statement::StatementHandlerPrototype::new(
+		sc_network_statement::StatementHandlerPrototype::new::<_, _, N>(
 			genesis_hash,
 			config.chain_spec.fork_id(),
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
 		);
 	net_config.add_notification_protocol(statement_config);
 
 	let mixnet_protocol_name =
 		sc_mixnet::protocol_name(genesis_hash.as_ref(), config.chain_spec.fork_id());
 	let mixnet_notification_service = mixnet_config.as_ref().map(|mixnet_config| {
-		let (config, notification_service) =
-			sc_mixnet::peers_set_config(mixnet_protocol_name.clone(), mixnet_config);
+		let (config, notification_service) = sc_mixnet::peers_set_config::<_, N>(
+			mixnet_protocol_name.clone(),
+			mixnet_config,
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
+		);
 		net_config.add_notification_protocol(config);
 		notification_service
 	});
@@ -482,6 +505,7 @@ pub fn new_full_base(
 			block_announce_validator_builder: None,
 			warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
 			block_relay: None,
+			metrics,
 		})?;
 
 	if let Some(mixnet_config) = mixnet_config {
@@ -615,7 +639,7 @@ pub fn new_full_base(
 					..Default::default()
 				},
 				client.clone(),
-				network.clone(),
+				Arc::new(network.clone()),
 				Box::pin(dht_event_stream),
 				authority_discovery_role,
 				prometheus_registry.clone(),
@@ -634,7 +658,7 @@ pub fn new_full_base(
 
 	// beefy is enabled if its notification service exists
 	let network_params = beefy::BeefyNetworkParams {
-		network: network.clone(),
+		network: Arc::new(network.clone()),
 		sync: sync_service.clone(),
 		gossip_protocol_name: beefy_gossip_proto_name,
 		justifications_protocol_name: beefy_on_demand_justifications_handler.protocol_name(),
@@ -746,7 +770,7 @@ pub fn new_full_base(
 				transaction_pool: Some(OffchainTransactionPoolFactory::new(
 					transaction_pool.clone(),
 				)),
-				network_provider: network.clone(),
+				network_provider: Arc::new(network.clone()),
 				is_validator: role.is_authority(),
 				enable_http_requests: true,
 				custom_extensions: move |_| {
@@ -773,8 +797,29 @@ pub fn new_full_base(
 pub fn new_full(config: Configuration, cli: Cli) -> Result<TaskManager, ServiceError> {
 	let mixnet_config = cli.mixnet_params.config(config.role.is_authority());
 	let database_path = config.database.path().map(Path::to_path_buf);
-	let task_manager = new_full_base(config, mixnet_config, cli.no_hardware_benchmarks, |_, _| ())
-		.map(|NewFullBase { task_manager, .. }| task_manager)?;
+
+	let task_manager = match config.network.network_backend {
+		sc_network::config::NetworkBackendType::Libp2p => {
+			let task_manager = new_full_base::<sc_network::NetworkWorker<_, _>>(
+				config,
+				mixnet_config,
+				cli.no_hardware_benchmarks,
+				|_, _| (),
+			)
+			.map(|NewFullBase { task_manager, .. }| task_manager)?;
+			task_manager
+		},
+		sc_network::config::NetworkBackendType::Litep2p => {
+			let task_manager = new_full_base::<sc_network::Litep2pNetworkBackend>(
+				config,
+				mixnet_config,
+				cli.no_hardware_benchmarks,
+				|_, _| (),
+			)
+			.map(|NewFullBase { task_manager, .. }| task_manager)?;
+			task_manager
+		},
+	};
 
 	if let Some(database_path) = database_path {
 		sc_storage_monitor::StorageMonitorService::try_spawn(
@@ -851,7 +896,7 @@ mod tests {
 			|config| {
 				let mut setup_handles = None;
 				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
-					new_full_base(
+					new_full_base::<sc_network::NetworkWorker<_, _>>(
 						config,
 						None,
 						false,
@@ -1029,7 +1074,12 @@ mod tests {
 			crate::chain_spec::tests::integration_test_config_with_two_authorities(),
 			|config| {
 				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
-					new_full_base(config, None, false, |_, _| ())?;
+					new_full_base::<sc_network::NetworkWorker<_, _>>(
+						config,
+						None,
+						false,
+						|_, _| (),
+					)?;
 				Ok(sc_service_test::TestNetComponents::new(
 					task_manager,
 					client,
diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml
index dbd9ba0131a6825e708459ee86c702ea89974a57..0cf90ada8ac61bda63a12c5f3072017ccd067270 100644
--- a/substrate/client/authority-discovery/Cargo.toml
+++ b/substrate/client/authority-discovery/Cargo.toml
@@ -25,10 +25,7 @@ futures = "0.3.30"
 futures-timer = "3.0.1"
 ip_network = "0.4.1"
 libp2p = { version = "0.51.4", features = ["ed25519", "kad"] }
-multihash = { version = "0.18.1", default-features = false, features = [
-	"sha2",
-	"std",
-] }
+multihash = { version = "0.17.0", default-features = false, features = ["sha2", "std"] }
 linked_hash_set = "0.1.4"
 log = { workspace = true, default-features = true }
 prost = "0.12"
@@ -37,6 +34,7 @@ thiserror = { workspace = true }
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" }
 sc-client-api = { path = "../api" }
 sc-network = { path = "../network" }
+sc-network-types = { path = "../network/types" }
 sp-api = { path = "../../primitives/api" }
 sp-authority-discovery = { path = "../../primitives/authority-discovery" }
 sp-blockchain = { path = "../../primitives/blockchain" }
diff --git a/substrate/client/authority-discovery/src/error.rs b/substrate/client/authority-discovery/src/error.rs
index ca685115d49754d400afb2676db110549b53c73b..6f791237c2f229c3f94996e2a91c0c0de8800d0d 100644
--- a/substrate/client/authority-discovery/src/error.rs
+++ b/substrate/client/authority-discovery/src/error.rs
@@ -35,7 +35,7 @@ pub enum Error {
 	VerifyingDhtPayload,
 
 	#[error("Failed to hash the authority id to be used as a dht key.")]
-	HashingAuthorityId(#[from] libp2p::core::multiaddr::multihash::Error),
+	HashingAuthorityId(#[from] sc_network::multiaddr::multihash::Error),
 
 	#[error("Failed calling into the Substrate runtime: {0}")]
 	CallingRuntime(#[from] sp_blockchain::Error),
@@ -53,10 +53,10 @@ pub enum Error {
 	EncodingDecodingScale(#[from] codec::Error),
 
 	#[error("Failed to parse a libp2p multi address.")]
-	ParsingMultiaddress(#[from] libp2p::core::multiaddr::Error),
+	ParsingMultiaddress(#[from] sc_network::multiaddr::Error),
 
-	#[error("Failed to parse a libp2p key.")]
-	ParsingLibp2pIdentity(#[from] libp2p::identity::DecodingError),
+	#[error("Failed to parse a libp2p key: {0}")]
+	ParsingLibp2pIdentity(String),
 
 	#[error("Failed to sign: {0}.")]
 	CannotSign(String),
diff --git a/substrate/client/authority-discovery/src/lib.rs b/substrate/client/authority-discovery/src/lib.rs
index 281188de14324ed38793a8968341f6d450d3f8c0..e674c51571effe6c7d209c5eaaaad96da3b28a7a 100644
--- a/substrate/client/authority-discovery/src/lib.rs
+++ b/substrate/client/authority-discovery/src/lib.rs
@@ -40,8 +40,8 @@ use futures::{
 	Stream,
 };
 
-use libp2p::{Multiaddr, PeerId};
-use sc_network::event::DhtEvent;
+use sc_network::{event::DhtEvent, Multiaddr};
+use sc_network_types::PeerId;
 use sp_authority_discovery::AuthorityId;
 use sp_blockchain::HeaderBackend;
 use sp_runtime::traits::Block as BlockT;
@@ -117,16 +117,15 @@ impl Default for WorkerConfig {
 /// Create a new authority discovery [`Worker`] and [`Service`].
 ///
 /// See the struct documentation of each for more details.
-pub fn new_worker_and_service<Client, Network, Block, DhtEventStream>(
+pub fn new_worker_and_service<Client, Block, DhtEventStream>(
 	client: Arc<Client>,
-	network: Arc<Network>,
+	network: Arc<dyn NetworkProvider>,
 	dht_event_rx: DhtEventStream,
 	role: Role,
 	prometheus_registry: Option<prometheus_endpoint::Registry>,
-) -> (Worker<Client, Network, Block, DhtEventStream>, Service)
+) -> (Worker<Client, Block, DhtEventStream>, Service)
 where
 	Block: BlockT + Unpin + 'static,
-	Network: NetworkProvider,
 	Client: AuthorityDiscovery<Block> + Send + Sync + 'static + HeaderBackend<Block>,
 	DhtEventStream: Stream<Item = DhtEvent> + Unpin,
 {
@@ -143,17 +142,16 @@ where
 /// Same as [`new_worker_and_service`] but with support for providing the `config`.
 ///
 /// When in doubt use [`new_worker_and_service`] as it will use the default configuration.
-pub fn new_worker_and_service_with_config<Client, Network, Block, DhtEventStream>(
+pub fn new_worker_and_service_with_config<Client, Block, DhtEventStream>(
 	config: WorkerConfig,
 	client: Arc<Client>,
-	network: Arc<Network>,
+	network: Arc<dyn NetworkProvider>,
 	dht_event_rx: DhtEventStream,
 	role: Role,
 	prometheus_registry: Option<prometheus_endpoint::Registry>,
-) -> (Worker<Client, Network, Block, DhtEventStream>, Service)
+) -> (Worker<Client, Block, DhtEventStream>, Service)
 where
 	Block: BlockT + Unpin + 'static,
-	Network: NetworkProvider,
 	Client: AuthorityDiscovery<Block> + 'static,
 	DhtEventStream: Stream<Item = DhtEvent> + Unpin,
 {
diff --git a/substrate/client/authority-discovery/src/service.rs b/substrate/client/authority-discovery/src/service.rs
index 89ae058d17f7adeac08c2faeec959694061a3ff0..60c7a2b990378f0c91a16854bdb815de8661c53a 100644
--- a/substrate/client/authority-discovery/src/service.rs
+++ b/substrate/client/authority-discovery/src/service.rs
@@ -25,7 +25,8 @@ use futures::{
 	SinkExt,
 };
 
-use libp2p::{Multiaddr, PeerId};
+use sc_network::Multiaddr;
+use sc_network_types::PeerId;
 use sp_authority_discovery::AuthorityId;
 
 /// Service to interact with the [`crate::Worker`].
diff --git a/substrate/client/authority-discovery/src/tests.rs b/substrate/client/authority-discovery/src/tests.rs
index edd50d073c8d79729676e55300f37fd0f8ef0064..acfd0e61de01981e0bd9a69825b073e97d020322 100644
--- a/substrate/client/authority-discovery/src/tests.rs
+++ b/substrate/client/authority-discovery/src/tests.rs
@@ -25,13 +25,10 @@ use crate::{
 };
 
 use futures::{channel::mpsc::channel, executor::LocalPool, task::LocalSpawn};
-use libp2p::{
-	core::multiaddr::{Multiaddr, Protocol},
-	identity::ed25519,
-	PeerId,
-};
+use libp2p::identity::ed25519;
 use std::{collections::HashSet, sync::Arc};
 
+use sc_network::{multiaddr::Protocol, Multiaddr, PeerId};
 use sp_authority_discovery::AuthorityId;
 use sp_core::crypto::key_types;
 use sp_keystore::{testing::MemoryKeystore, Keystore};
@@ -78,7 +75,7 @@ fn get_addresses_and_authority_id() {
 		);
 		assert_eq!(
 			Some(HashSet::from([remote_authority_id])),
-			service.get_authority_ids_by_peer_id(remote_peer_id).await,
+			service.get_authority_ids_by_peer_id(remote_peer_id.into()).await,
 		);
 	});
 }
diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs
index 546f8cdbffdc807c9efabaa4417766cd264b5da7..53418d2d38c4a0ce0ab2c2b938b288360d2fdbbc 100644
--- a/substrate/client/authority-discovery/src/worker.rs
+++ b/substrate/client/authority-discovery/src/worker.rs
@@ -34,9 +34,8 @@ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}
 use addr_cache::AddrCache;
 use codec::{Decode, Encode};
 use ip_network::IpNetwork;
-use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId};
 use linked_hash_set::LinkedHashSet;
-use multihash_codetable::{Code, MultihashDigest};
+use multihash::{Code, Multihash, MultihashDigest};
 
 use log::{debug, error, log_enabled};
 use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64};
@@ -44,8 +43,10 @@ use prost::Message;
 use rand::{seq::SliceRandom, thread_rng};
 
 use sc_network::{
-	event::DhtEvent, KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature,
+	event::DhtEvent, multiaddr, KademliaKey, Multiaddr, NetworkDHTProvider, NetworkSigner,
+	NetworkStateInfo,
 };
+use sc_network_types::PeerId;
 use sp_api::{ApiError, ProvideRuntimeApi};
 use sp_authority_discovery::{
 	AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature,
@@ -108,13 +109,13 @@ pub enum Role {
 ///    network peerset.
 ///
 ///    5. Allow querying of the collected addresses via the [`crate::Service`].
-pub struct Worker<Client, Network, Block, DhtEventStream> {
+pub struct Worker<Client, Block, DhtEventStream> {
 	/// Channel receiver for messages send by a [`crate::Service`].
 	from_service: Fuse<mpsc::Receiver<ServicetoWorkerMsg>>,
 
 	client: Arc<Client>,
 
-	network: Arc<Network>,
+	network: Arc<dyn NetworkProvider>,
 
 	/// Channel we receive Dht events on.
 	dht_event_rx: DhtEventStream,
@@ -192,10 +193,9 @@ where
 	}
 }
 
-impl<Client, Network, Block, DhtEventStream> Worker<Client, Network, Block, DhtEventStream>
+impl<Client, Block, DhtEventStream> Worker<Client, Block, DhtEventStream>
 where
 	Block: BlockT + Unpin + 'static,
-	Network: NetworkProvider,
 	Client: AuthorityDiscovery<Block> + 'static,
 	DhtEventStream: Stream<Item = DhtEvent> + Unpin,
 {
@@ -203,7 +203,7 @@ where
 	pub(crate) fn new(
 		from_service: mpsc::Receiver<ServicetoWorkerMsg>,
 		client: Arc<Client>,
-		network: Arc<Network>,
+		network: Arc<dyn NetworkProvider>,
 		dht_event_rx: DhtEventStream,
 		role: Role,
 		prometheus_registry: Option<prometheus_endpoint::Registry>,
@@ -406,10 +406,14 @@ where
 			Role::Discover => return Ok(()),
 		};
 
-		let keys = Worker::<Client, Network, Block, DhtEventStream>::get_own_public_keys_within_authority_set(
-			key_store.clone(),
-			self.client.as_ref(),
-		).await?.into_iter().collect::<HashSet<_>>();
+		let keys =
+			Worker::<Client, Block, DhtEventStream>::get_own_public_keys_within_authority_set(
+				key_store.clone(),
+				self.client.as_ref(),
+			)
+			.await?
+			.into_iter()
+			.collect::<HashSet<_>>();
 
 		if only_if_changed {
 			// If the authority keys did not change and the `publish_if_changed_interval` was
@@ -434,7 +438,7 @@ where
 		}
 
 		let serialized_record = serialize_authority_record(addresses)?;
-		let peer_signature = sign_record_with_peer_id(&serialized_record, self.network.as_ref())?;
+		let peer_signature = sign_record_with_peer_id(&serialized_record, &self.network)?;
 
 		let keys_vec = keys.iter().cloned().collect::<Vec<_>>();
 
@@ -634,12 +638,15 @@ where
 				// properly signed by the owner of the PeerId
 
 				if let Some(peer_signature) = peer_signature {
-					let public_key = PublicKey::try_decode_protobuf(&peer_signature.public_key)
-						.map_err(Error::ParsingLibp2pIdentity)?;
-					let signature = Signature { public_key, bytes: peer_signature.signature };
-
-					if !signature.verify(record, &remote_peer_id) {
-						return Err(Error::VerifyingDhtPayload)
+					match self.network.verify(
+						remote_peer_id.into(),
+						&peer_signature.public_key,
+						&peer_signature.signature,
+						&record,
+					) {
+						Ok(true) => {},
+						Ok(false) => return Err(Error::VerifyingDhtPayload),
+						Err(error) => return Err(Error::ParsingLibp2pIdentity(error)),
 					}
 				} else if self.strict_record_validation {
 					return Err(Error::MissingPeerIdSignature)
@@ -701,9 +708,15 @@ where
 /// NetworkProvider provides [`Worker`] with all necessary hooks into the
 /// underlying Substrate networking. Using this trait abstraction instead of
 /// `sc_network::NetworkService` directly is necessary to unit test [`Worker`].
-pub trait NetworkProvider: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {}
+pub trait NetworkProvider:
+	NetworkDHTProvider + NetworkStateInfo + NetworkSigner + Send + Sync
+{
+}
 
-impl<T> NetworkProvider for T where T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {}
+impl<T> NetworkProvider for T where
+	T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner + Send + Sync
+{
+}
 
 fn hash_authority_id(id: &[u8]) -> KademliaKey {
 	KademliaKey::new(&Code::Sha2_256.digest(id).digest())
@@ -741,7 +754,7 @@ fn sign_record_with_peer_id(
 	network: &impl NetworkSigner,
 ) -> Result<schema::PeerSignature> {
 	let signature = network
-		.sign_with_local_identity(serialized_record)
+		.sign_with_local_identity(serialized_record.to_vec())
 		.map_err(|e| Error::CannotSign(format!("{} (network packet)", e)))?;
 	let public_key = signature.public_key.encode_protobuf();
 	let signature = signature.bytes;
@@ -855,7 +868,7 @@ impl Metrics {
 
 // Helper functions for unit testing.
 #[cfg(test)]
-impl<Block, Client, Network, DhtEventStream> Worker<Client, Network, Block, DhtEventStream> {
+impl<Block, Client, DhtEventStream> Worker<Client, Block, DhtEventStream> {
 	pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec<Multiaddr>) {
 		self.addr_cache.insert(authority, addresses);
 	}
diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs
index 8084b7f0a6dff04edb525db0034c01ae2398219c..6e3b3c8af20190f3711aac72fbd5e7cf3eb493c7 100644
--- a/substrate/client/authority-discovery/src/worker/addr_cache.rs
+++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs
@@ -16,10 +16,8 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use libp2p::{
-	core::multiaddr::{Multiaddr, Protocol},
-	PeerId,
-};
+use sc_network::{multiaddr::Protocol, Multiaddr};
+use sc_network_types::PeerId;
 use sp_authority_discovery::AuthorityId;
 use std::collections::{hash_map::Entry, HashMap, HashSet};
 
@@ -178,7 +176,7 @@ fn addresses_to_peer_ids(addresses: &HashSet<Multiaddr>) -> HashSet<PeerId> {
 mod tests {
 	use super::*;
 
-	use libp2p::multihash::{self, Multihash};
+	use multihash::{self, Multihash};
 	use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
 
 	use sp_authority_discovery::{AuthorityId, AuthorityPair};
diff --git a/substrate/client/authority-discovery/src/worker/schema/tests.rs b/substrate/client/authority-discovery/src/worker/schema/tests.rs
index c765e4e5384db483cd2e9c94156ff487c8410456..ef06ed7d336b0894f54511dfc87561a7b5d68d54 100644
--- a/substrate/client/authority-discovery/src/worker/schema/tests.rs
+++ b/substrate/client/authority-discovery/src/worker/schema/tests.rs
@@ -21,8 +21,9 @@ mod schema_v1 {
 }
 
 use super::*;
-use libp2p::{identity::Keypair, multiaddr::Multiaddr, PeerId};
+use libp2p::identity::Keypair;
 use prost::Message;
+use sc_network::{Multiaddr, PeerId};
 
 #[test]
 fn v2_decodes_v1() {
diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs
index 6c684d88e5027b97c08fc29280e9686539440441..caeac56c54073e53026ec3f8c52c8e7f88958f2b 100644
--- a/substrate/client/authority-discovery/src/worker/tests.rs
+++ b/substrate/client/authority-discovery/src/worker/tests.rs
@@ -29,16 +29,11 @@ use futures::{
 	sink::SinkExt,
 	task::LocalSpawn,
 };
-use libp2p::{
-	core::multiaddr,
-	identity::{Keypair, SigningError},
-	kad::record::Key as KademliaKey,
-	PeerId,
-};
+use libp2p::{core::multiaddr, identity::SigningError, kad::record::Key as KademliaKey, PeerId};
 use prometheus_endpoint::prometheus::default_registry;
 
 use sc_client_api::HeaderBackend;
-use sc_network::Signature;
+use sc_network::{service::signature::Keypair, Signature};
 use sp_api::{ApiRef, ProvideRuntimeApi};
 use sp_keystore::{testing::MemoryKeystore, Keystore};
 use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
@@ -122,7 +117,7 @@ pub enum TestNetworkEvent {
 }
 
 pub struct TestNetwork {
-	peer_id: PeerId,
+	peer_id: sc_network_types::PeerId,
 	identity: Keypair,
 	external_addresses: Vec<Multiaddr>,
 	// Whenever functions on `TestNetwork` are called, the function arguments are added to the
@@ -158,10 +153,25 @@ impl Default for TestNetwork {
 impl NetworkSigner for TestNetwork {
 	fn sign_with_local_identity(
 		&self,
-		msg: impl AsRef<[u8]>,
+		msg: Vec<u8>,
 	) -> std::result::Result<Signature, SigningError> {
 		Signature::sign_message(msg, &self.identity)
 	}
+
+	fn verify(
+		&self,
+		peer_id: sc_network_types::PeerId,
+		public_key: &Vec<u8>,
+		signature: &Vec<u8>,
+		message: &Vec<u8>,
+	) -> std::result::Result<bool, String> {
+		let public_key = libp2p::identity::PublicKey::try_decode_protobuf(&public_key)
+			.map_err(|error| error.to_string())?;
+		let peer_id: PeerId = peer_id.into();
+		let remote: libp2p::PeerId = public_key.to_peer_id();
+
+		Ok(peer_id == remote && public_key.verify(message, signature))
+	}
 }
 
 impl NetworkDHTProvider for TestNetwork {
@@ -182,8 +192,8 @@ impl NetworkDHTProvider for TestNetwork {
 }
 
 impl NetworkStateInfo for TestNetwork {
-	fn local_peer_id(&self) -> PeerId {
-		self.peer_id
+	fn local_peer_id(&self) -> sc_network_types::PeerId {
+		self.peer_id.into()
 	}
 
 	fn external_addresses(&self) -> Vec<Multiaddr> {
@@ -202,10 +212,20 @@ struct TestSigner<'a> {
 impl<'a> NetworkSigner for TestSigner<'a> {
 	fn sign_with_local_identity(
 		&self,
-		msg: impl AsRef<[u8]>,
+		msg: Vec<u8>,
 	) -> std::result::Result<Signature, SigningError> {
 		Signature::sign_message(msg, self.keypair)
 	}
+
+	fn verify(
+		&self,
+		_: sc_network_types::PeerId,
+		_: &Vec<u8>,
+		_: &Vec<u8>,
+		_: &Vec<u8>,
+	) -> std::result::Result<bool, String> {
+		unimplemented!();
+	}
 }
 
 fn build_dht_event<Signer: NetworkSigner>(
@@ -500,7 +520,6 @@ struct DhtValueFoundTester {
 	pub local_worker: Option<
 		Worker<
 			TestApi,
-			TestNetwork,
 			sp_runtime::generic::Block<
 				sp_runtime::generic::Header<u64, sp_runtime::traits::BlakeTwo256>,
 				substrate_test_runtime_client::runtime::Extrinsic,
diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs
index d436673cb9de77deac7625e0e401cfbd34f5a04f..b5819d03447a8bfb6eb3e9e40d2f129af80283ea 100644
--- a/substrate/client/cli/src/arg_enums.rs
+++ b/substrate/client/cli/src/arg_enums.rs
@@ -296,3 +296,23 @@ impl Into<sc_network::config::SyncMode> for SyncMode {
 		}
 	}
 }
+
+/// Network backend type.
+#[derive(Debug, Clone, Copy, ValueEnum, PartialEq)]
+#[value(rename_all = "lower")]
+pub enum NetworkBackendType {
+	/// Use libp2p for P2P networking.
+	Libp2p,
+
+	/// Use litep2p for P2P networking.
+	Litep2p,
+}
+
+impl Into<sc_network::config::NetworkBackendType> for NetworkBackendType {
+	fn into(self) -> sc_network::config::NetworkBackendType {
+		match self {
+			Self::Libp2p => sc_network::config::NetworkBackendType::Libp2p,
+			Self::Litep2p => sc_network::config::NetworkBackendType::Litep2p,
+		}
+	}
+}
diff --git a/substrate/client/cli/src/commands/build_spec_cmd.rs b/substrate/client/cli/src/commands/build_spec_cmd.rs
index aa5314f9cf5a4c244d9ca26f343f158e94d00908..df8c6b7d0baac2d7ba77a22752925c37daf0b85a 100644
--- a/substrate/client/cli/src/commands/build_spec_cmd.rs
+++ b/substrate/client/cli/src/commands/build_spec_cmd.rs
@@ -67,7 +67,7 @@ impl BuildSpecCmd {
 			let peer_id = keys.public().to_peer_id();
 			let addr = MultiaddrWithPeerId {
 				multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)],
-				peer_id,
+				peer_id: peer_id.into(),
 			};
 			spec.add_boot_node(addr)
 		}
diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs
index 94efb4280912041dcea512096226824012114b9b..748b84a50d2ae1cc60a8b5195e14e17615bc865a 100644
--- a/substrate/client/cli/src/params/network_params.rs
+++ b/substrate/client/cli/src/params/network_params.rs
@@ -16,7 +16,10 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams};
+use crate::{
+	arg_enums::{NetworkBackendType, SyncMode},
+	params::node_key_params::NodeKeyParams,
+};
 use clap::Args;
 use sc_network::{
 	config::{
@@ -166,6 +169,20 @@ pub struct NetworkParams {
 	/// and observe block requests timing out.
 	#[arg(long, value_name = "COUNT", default_value_t = 64)]
 	pub max_blocks_per_request: u32,
+
+	/// Network backend used for P2P networking.
+	///
+	/// litep2p network backend is considered experimental and isn't as stable as the libp2p
+	/// network backend.
+	#[arg(
+		long,
+		value_enum,
+		value_name = "NETWORK_BACKEND",
+		default_value_t = NetworkBackendType::Libp2p,
+		ignore_case = true,
+		verbatim_doc_comment
+	)]
+	pub network_backend: NetworkBackendType,
 }
 
 impl NetworkParams {
@@ -261,6 +278,7 @@ impl NetworkParams {
 			yamux_window_size: None,
 			ipfs_server: self.ipfs_server,
 			sync_mode: self.sync.into(),
+			network_backend: self.network_backend.into(),
 		}
 	}
 }
diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml
index c1d57baa394a379226a6ba8625143328f08a0235..7b61b3c6c01f39003d9212fcfc0ea4abc8963bd2 100644
--- a/substrate/client/consensus/beefy/Cargo.toml
+++ b/substrate/client/consensus/beefy/Cargo.toml
@@ -28,6 +28,7 @@ sc-consensus = { path = "../common" }
 sc-network = { path = "../../network" }
 sc-network-gossip = { path = "../../network-gossip" }
 sc-network-sync = { path = "../../network/sync" }
+sc-network-types = { path = "../../network/types" }
 sc-utils = { path = "../../utils" }
 sp-api = { path = "../../../primitives/api" }
 sp-application-crypto = { path = "../../../primitives/application-crypto" }
diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs
index d31559131cc175fe6125c459c855fa6971e7ff4f..947fe13856f476042858b1a5698aa5333b1d287b 100644
--- a/substrate/client/consensus/beefy/src/communication/gossip.rs
+++ b/substrate/client/consensus/beefy/src/communication/gossip.rs
@@ -18,8 +18,9 @@
 
 use std::{collections::BTreeSet, sync::Arc, time::Duration};
 
-use sc_network::{NetworkPeers, PeerId, ReputationChange};
+use sc_network::{NetworkPeers, ReputationChange};
 use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext};
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block, Hash, Header, NumberFor};
 
 use codec::{Decode, DecodeAll, Encode};
@@ -506,6 +507,7 @@ pub(crate) mod tests {
 		}
 	}
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for TestNetwork {
 		fn set_authorized_peers(&self, _: std::collections::HashSet<PeerId>) {
 			unimplemented!()
@@ -581,6 +583,10 @@ pub(crate) mod tests {
 		fn peer_role(&self, _: PeerId, _: Vec<u8>) -> Option<sc_network::ObservedRole> {
 			unimplemented!()
 		}
+
+		async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
+			unimplemented!();
+		}
 	}
 
 	struct TestContext;
@@ -591,11 +597,11 @@ pub(crate) mod tests {
 
 		fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec<u8>, _force: bool) {}
 
-		fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec<u8>) {
+		fn send_message(&mut self, _who: &sc_network_types::PeerId, _message: Vec<u8>) {
 			unimplemented!()
 		}
 
-		fn send_topic(&mut self, _who: &sc_network::PeerId, _topic: B::Hash, _force: bool) {
+		fn send_topic(&mut self, _who: &sc_network_types::PeerId, _topic: B::Hash, _force: bool) {
 			unimplemented!()
 		}
 	}
@@ -772,7 +778,7 @@ pub(crate) mod tests {
 			Arc::new(TestNetwork::new().0),
 		);
 		gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set });
-		let sender = sc_network::PeerId::random();
+		let sender = sc_network_types::PeerId::random();
 		let topic = Default::default();
 		let intent = MessageIntent::Broadcast;
 
@@ -852,7 +858,7 @@ pub(crate) mod tests {
 			Arc::new(TestNetwork::new().0),
 		);
 		gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set });
-		let sender = sc_network::PeerId::random();
+		let sender = sc_network_types::PeerId::random();
 		let topic = Default::default();
 
 		let vote = dummy_vote(1);
diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs
index 09c540e3b8a8459826e7e338c6dc0ee83aa1823a..3c93368be3635a9f2b1fb7b03253ae66e1af1b39 100644
--- a/substrate/client/consensus/beefy/src/communication/mod.rs
+++ b/substrate/client/consensus/beefy/src/communication/mod.rs
@@ -65,17 +65,28 @@ pub(crate) mod beefy_protocol_name {
 /// Returns the configuration value to put in
 /// [`sc_network::config::FullNetworkConfiguration`].
 /// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`].
-pub fn beefy_peers_set_config(
+pub fn beefy_peers_set_config<
+	B: sp_runtime::traits::Block,
+	N: sc_network::NetworkBackend<B, <B as sp_runtime::traits::Block>::Hash>,
+>(
 	gossip_protocol_name: sc_network::ProtocolName,
-) -> (sc_network::config::NonDefaultSetConfig, Box<dyn sc_network::NotificationService>) {
-	let (mut cfg, notification_service) = sc_network::config::NonDefaultSetConfig::new(
+	metrics: sc_network::service::NotificationMetrics,
+	peer_store_handle: std::sync::Arc<dyn sc_network::peer_store::PeerStoreProvider>,
+) -> (N::NotificationProtocolConfig, Box<dyn sc_network::NotificationService>) {
+	let (cfg, notification_service) = N::notification_config(
 		gossip_protocol_name,
 		Vec::new(),
 		1024 * 1024,
 		None,
-		Default::default(),
+		sc_network::config::SetConfig {
+			in_peers: 25,
+			out_peers: 25,
+			reserved_nodes: Vec::new(),
+			non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept,
+		},
+		metrics,
+		peer_store_handle,
 	);
-	cfg.allow_non_reserved(25, 25);
 	(cfg, notification_service)
 }
 
diff --git a/substrate/client/consensus/beefy/src/communication/peers.rs b/substrate/client/consensus/beefy/src/communication/peers.rs
index 8f2d5cc90a1d927b21cb0a0b6de8efee5a0d3a59..2d801aceaa8a76bf1c31820493bbd20b4f93f8da 100644
--- a/substrate/client/consensus/beefy/src/communication/peers.rs
+++ b/substrate/client/consensus/beefy/src/communication/peers.rs
@@ -18,7 +18,8 @@
 
 //! Logic for keeping track of BEEFY peers.
 
-use sc_network::{PeerId, ReputationChange};
+use sc_network::ReputationChange;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block, NumberFor, Zero};
 use std::collections::{HashMap, VecDeque};
 
diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs
index ce184769fa7748368acb25db3228e4547ade4a5b..7893066a01e06c967893bdedbd25ad3c2da9c3bd 100644
--- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs
+++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs
@@ -21,9 +21,10 @@ use futures::{channel::oneshot, StreamExt};
 use log::{debug, trace};
 use sc_client_api::BlockBackend;
 use sc_network::{
-	config as netconfig, config::RequestResponseConfig, types::ProtocolName, PeerId,
-	ReputationChange,
+	config as netconfig, service::traits::RequestResponseConfig, types::ProtocolName,
+	NetworkBackend, ReputationChange,
 };
+use sc_network_types::PeerId;
 use sp_consensus_beefy::BEEFY_ENGINE_ID;
 use sp_runtime::traits::Block;
 use std::{marker::PhantomData, sync::Arc};
@@ -139,15 +140,15 @@ where
 	Client: BlockBackend<B> + Send + Sync,
 {
 	/// Create a new [`BeefyJustifsRequestHandler`].
-	pub fn new<Hash: AsRef<[u8]>>(
+	pub fn new<Hash: AsRef<[u8]>, Network: NetworkBackend<B, <B as Block>::Hash>>(
 		genesis_hash: Hash,
 		fork_id: Option<&str>,
 		client: Arc<Client>,
 		prometheus_registry: Option<prometheus::Registry>,
-	) -> (Self, RequestResponseConfig) {
-		let (request_receiver, config) =
-			on_demand_justifications_protocol_config(genesis_hash, fork_id);
-		let justif_protocol_name = config.name.clone();
+	) -> (Self, Network::RequestResponseProtocolConfig) {
+		let (request_receiver, config): (_, Network::RequestResponseProtocolConfig) =
+			on_demand_justifications_protocol_config::<_, _, Network>(genesis_hash, fork_id);
+		let justif_protocol_name = config.protocol_name().clone();
 		let metrics = register_metrics(prometheus_registry);
 		(
 			Self { request_receiver, justif_protocol_name, client, metrics, _block: PhantomData },
diff --git a/substrate/client/consensus/beefy/src/communication/request_response/mod.rs b/substrate/client/consensus/beefy/src/communication/request_response/mod.rs
index 4bad3b061c8e9e67d61685e8f59f24bf983faa83..3777d47719c57e65b664e37e612bbaa794effbf0 100644
--- a/substrate/client/consensus/beefy/src/communication/request_response/mod.rs
+++ b/substrate/client/consensus/beefy/src/communication/request_response/mod.rs
@@ -26,7 +26,8 @@ pub use incoming_requests_handler::BeefyJustifsRequestHandler;
 use std::time::Duration;
 
 use codec::{Decode, Encode, Error as CodecError};
-use sc_network::{config::RequestResponseConfig, PeerId};
+use sc_network::NetworkBackend;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block, NumberFor};
 
 use crate::communication::{beefy_protocol_name::justifications_protocol_name, peers::PeerReport};
@@ -47,23 +48,27 @@ const BEEFY_SYNC_LOG_TARGET: &str = "beefy::sync";
 /// `ProtocolConfig`.
 ///
 /// Consider using [`BeefyJustifsRequestHandler`] instead of this low-level function.
-pub(crate) fn on_demand_justifications_protocol_config<Hash: AsRef<[u8]>>(
+pub(crate) fn on_demand_justifications_protocol_config<
+	Hash: AsRef<[u8]>,
+	B: Block,
+	Network: NetworkBackend<B, <B as Block>::Hash>,
+>(
 	genesis_hash: Hash,
 	fork_id: Option<&str>,
-) -> (IncomingRequestReceiver, RequestResponseConfig) {
+) -> (IncomingRequestReceiver, Network::RequestResponseProtocolConfig) {
 	let name = justifications_protocol_name(genesis_hash, fork_id);
 	let fallback_names = vec![];
 	let (tx, rx) = async_channel::bounded(JUSTIF_CHANNEL_SIZE);
 	let rx = IncomingRequestReceiver::new(rx);
-	let cfg = RequestResponseConfig {
+	let cfg = Network::request_response_config(
 		name,
 		fallback_names,
-		max_request_size: 32,
-		max_response_size: MAX_RESPONSE_SIZE,
+		32,
+		MAX_RESPONSE_SIZE,
 		// We are connected to all validators:
-		request_timeout: JUSTIF_REQUEST_TIMEOUT,
-		inbound_queue: Some(tx),
-	};
+		JUSTIF_REQUEST_TIMEOUT,
+		Some(tx),
+	);
 	(rx, cfg)
 }
 
diff --git a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs
index 992b9fa08c093cfbb6bc65e83915997253c6c1dd..2ab072960900817155b42434f69f29236fcda54b 100644
--- a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs
+++ b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs
@@ -24,8 +24,9 @@ use log::{debug, warn};
 use parking_lot::Mutex;
 use sc_network::{
 	request_responses::{IfDisconnected, RequestFailure},
-	NetworkRequest, PeerId, ProtocolName,
+	NetworkRequest, ProtocolName,
 };
+use sc_network_types::PeerId;
 use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, ValidatorSet};
 use sp_runtime::traits::{Block, NumberFor};
 use std::{collections::VecDeque, result::Result, sync::Arc};
diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs
index d4ec6ffd497b441dbcf02d7e542ed5549c0fc50f..9b13d1da6d7da38b74864c9a750f515ef9472230 100644
--- a/substrate/client/consensus/beefy/src/tests.rs
+++ b/substrate/client/consensus/beefy/src/tests.rs
@@ -125,7 +125,11 @@ impl BeefyTestNet {
 		let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority), beefy_genesis };
 
 		for i in 0..n_authority {
-			let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None);
+			let (rx, cfg) = on_demand_justifications_protocol_config::<
+				_,
+				Block,
+				sc_network::NetworkWorker<_, _>,
+			>(GENESIS_HASH, None);
 			let justif_protocol_name = cfg.name.clone();
 
 			net.add_authority_peer(vec![cfg]);
diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml
index b2738a1d12d9735e21152e9129c9b33b907b6df9..6d642ec78fefa88692503152595f8548d715426c 100644
--- a/substrate/client/consensus/common/Cargo.toml
+++ b/substrate/client/consensus/common/Cargo.toml
@@ -19,7 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"]
 async-trait = "0.1.79"
 futures = { version = "0.3.30", features = ["thread-pool"] }
 futures-timer = "3.0.1"
-libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] }
 log = { workspace = true, default-features = true }
 mockall = "0.11.3"
 parking_lot = "0.12.1"
@@ -27,6 +26,7 @@ serde = { features = ["derive"], workspace = true, default-features = true }
 thiserror = { workspace = true }
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" }
 sc-client-api = { path = "../../api" }
+sc-network-types = { path = "../../network/types" }
 sc-utils = { path = "../../utils" }
 sp-api = { path = "../../../primitives/api" }
 sp-blockchain = { path = "../../../primitives/blockchain" }
diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs
index 062e244a912ef824b3a85574c5195889e2556a89..371465536c35a5feea5d6e157eb3ee8b019b94db 100644
--- a/substrate/client/consensus/common/src/import_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue.rs
@@ -64,7 +64,7 @@ pub type BoxJustificationImport<B> =
 	Box<dyn JustificationImport<B, Error = ConsensusError> + Send + Sync>;
 
 /// Maps to the RuntimeOrigin used by the network.
-pub type RuntimeOrigin = libp2p_identity::PeerId;
+pub type RuntimeOrigin = sc_network_types::PeerId;
 
 /// Block data used by the queue.
 #[derive(Debug, PartialEq, Eq, Clone)]
diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
index 125d4f104c1918bf5a54e5a13f67b4603a3ae302..f4f618d1b31825e17987130b20a96c21e475b098 100644
--- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
@@ -632,7 +632,7 @@ mod tests {
 			let hash = Hash::random();
 			finality_sender
 				.unbounded_send(worker_messages::ImportJustification(
-					libp2p_identity::PeerId::random(),
+					sc_network_types::PeerId::random(),
 					hash,
 					1,
 					(*b"TEST", Vec::new()),
diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml
index 797b4ea35b29872e201b8abd6ae5781526f72108..e59c17b0680374d7195e823514d02f807f6f3cbf 100644
--- a/substrate/client/consensus/grandpa/Cargo.toml
+++ b/substrate/client/consensus/grandpa/Cargo.toml
@@ -41,6 +41,7 @@ sc-network = { path = "../../network" }
 sc-network-gossip = { path = "../../network-gossip" }
 sc-network-common = { path = "../../network/common" }
 sc-network-sync = { path = "../../network/sync" }
+sc-network-types = { path = "../../network/types" }
 sc-telemetry = { path = "../../telemetry" }
 sc-utils = { path = "../../utils" }
 sp-api = { path = "../../../primitives/api" }
diff --git a/substrate/client/consensus/grandpa/src/communication/gossip.rs b/substrate/client/consensus/grandpa/src/communication/gossip.rs
index 3a78b157d5b1bcb00b7ff83afb407a0015ed529f..88821faf0aba1a67ea65fe0e2a553e89eac40f47 100644
--- a/substrate/client/consensus/grandpa/src/communication/gossip.rs
+++ b/substrate/client/consensus/grandpa/src/communication/gossip.rs
@@ -90,9 +90,10 @@ use log::{debug, trace};
 use parity_scale_codec::{Decode, DecodeAll, Encode};
 use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64};
 use rand::seq::SliceRandom;
-use sc_network::{PeerId, ReputationChange};
+use sc_network::ReputationChange;
 use sc_network_common::role::ObservedRole;
 use sc_network_gossip::{MessageIntent, ValidatorContext};
+use sc_network_types::PeerId;
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG};
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_consensus_grandpa::AuthorityId;
diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs
index 6e87d6bf9a28924811efab920b98da7c51480481..cf78e1d4cf089a1097d662756bcf05f5528b48e0 100644
--- a/substrate/client/consensus/grandpa/src/communication/mod.rs
+++ b/substrate/client/consensus/grandpa/src/communication/mod.rs
@@ -488,7 +488,7 @@ impl<B: BlockT, N: Network<B>, S: Syncing<B>> NetworkBridge<B, N, S> {
 	/// connected to (NOTE: this assumption will change in the future #3629).
 	pub(crate) fn set_sync_fork_request(
 		&self,
-		peers: Vec<sc_network::PeerId>,
+		peers: Vec<sc_network_types::PeerId>,
 		hash: B::Hash,
 		number: NumberFor<B>,
 	) {
diff --git a/substrate/client/consensus/grandpa/src/communication/periodic.rs b/substrate/client/consensus/grandpa/src/communication/periodic.rs
index 9d0e76b7c80bd2911d49d14ddd3923f16dd6b211..70218ea33b4ed2d1fbe649ff04b7b9ca676d2a4d 100644
--- a/substrate/client/consensus/grandpa/src/communication/periodic.rs
+++ b/substrate/client/consensus/grandpa/src/communication/periodic.rs
@@ -27,7 +27,7 @@ use std::{
 	time::Duration,
 };
 
-use sc_network::PeerId;
+use sc_network_types::PeerId;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
@@ -44,7 +44,7 @@ impl<B: BlockT> NeighborPacketSender<B> {
 	/// Send a neighbor packet for the background worker to gossip to peers.
 	pub fn send(
 		&self,
-		who: Vec<sc_network::PeerId>,
+		who: Vec<sc_network_types::PeerId>,
 		neighbor_packet: NeighborPacket<NumberFor<B>>,
 	) {
 		if let Err(err) = self.0.unbounded_send((who, neighbor_packet)) {
diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs
index fe24fb3cb20ea055e8ebfe385a974f57af5e4396..40d901b2fec68449a29e6f4c2cf6ea03fa504a9e 100644
--- a/substrate/client/consensus/grandpa/src/communication/tests.rs
+++ b/substrate/client/consensus/grandpa/src/communication/tests.rs
@@ -30,14 +30,14 @@ use sc_network::{
 	event::Event as NetworkEvent,
 	service::traits::{Direction, MessageSink, NotificationEvent, NotificationService},
 	types::ProtocolName,
-	Multiaddr, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
-	NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender,
-	PeerId, ReputationChange,
+	Multiaddr, NetworkBlock, NetworkEventStream, NetworkPeers, NetworkSyncForkRequest,
+	ReputationChange,
 };
 use sc_network_common::role::{ObservedRole, Roles};
 use sc_network_gossip::Validator;
 use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream};
 use sc_network_test::{Block, Hash};
+use sc_network_types::PeerId;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_consensus_grandpa::AuthorityList;
 use sp_keyring::Ed25519Keyring;
@@ -62,6 +62,7 @@ pub(crate) struct TestNetwork {
 	sender: TracingUnboundedSender<Event>,
 }
 
+#[async_trait::async_trait]
 impl NetworkPeers for TestNetwork {
 	fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
 		unimplemented!();
@@ -134,6 +135,10 @@ impl NetworkPeers for TestNetwork {
 			.ok()
 			.and_then(|role| Some(ObservedRole::from(role)))
 	}
+
+	async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
+		unimplemented!();
+	}
 }
 
 impl NetworkEventStream for TestNetwork {
@@ -147,24 +152,6 @@ impl NetworkEventStream for TestNetwork {
 	}
 }
 
-impl NetworkNotification for TestNetwork {
-	fn write_notification(&self, target: PeerId, _protocol: ProtocolName, message: Vec<u8>) {
-		let _ = self.sender.unbounded_send(Event::WriteNotification(target, message));
-	}
-
-	fn notification_sender(
-		&self,
-		_target: PeerId,
-		_protocol: ProtocolName,
-	) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
-		unimplemented!();
-	}
-
-	fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
-		unimplemented!();
-	}
-}
-
 impl NetworkBlock<Hash, NumberFor<Block>> for TestNetwork {
 	fn announce_block(&self, hash: Hash, _data: Option<Vec<u8>>) {
 		let _ = self.sender.unbounded_send(Event::Announce(hash));
@@ -185,12 +172,7 @@ impl sc_network_gossip::ValidatorContext<Block> for TestNetwork {
 	fn broadcast_message(&mut self, _: Hash, _: Vec<u8>, _: bool) {}
 
 	fn send_message(&mut self, who: &PeerId, data: Vec<u8>) {
-		<Self as NetworkNotification>::write_notification(
-			self,
-			*who,
-			grandpa_protocol_name::NAME.into(),
-			data,
-		);
+		let _ = self.sender.unbounded_send(Event::WriteNotification(*who, data));
 	}
 
 	fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {}
@@ -241,13 +223,13 @@ impl NotificationService for TestNotificationService {
 	}
 
 	/// Send synchronous `notification` to `peer`.
-	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>) {
+	fn send_sync_notification(&mut self, peer: &PeerId, notification: Vec<u8>) {
 		let _ = self.sender.unbounded_send(Event::WriteNotification(*peer, notification));
 	}
 
 	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
 	async fn send_async_notification(
-		&self,
+		&mut self,
 		_peer: &PeerId,
 		_notification: Vec<u8>,
 	) -> Result<(), sc_network::error::Error> {
diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs
index b7cfc9f5b6019a5cea789dfcf385d7efc4e196e7..03452bd07c757fa9dac9b130a149aa264eb04834 100644
--- a/substrate/client/consensus/grandpa/src/lib.rs
+++ b/substrate/client/consensus/grandpa/src/lib.rs
@@ -67,7 +67,7 @@ use sc_client_api::{
 	BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider,
 };
 use sc_consensus::BlockImport;
-use sc_network::{types::ProtocolName, NotificationService};
+use sc_network::{types::ProtocolName, NetworkBackend, NotificationService};
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO};
 use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
@@ -343,7 +343,7 @@ pub(crate) trait BlockSyncRequester<Block: BlockT> {
 	/// connected to (NOTE: this assumption will change in the future #3629).
 	fn set_sync_fork_request(
 		&self,
-		peers: Vec<sc_network::PeerId>,
+		peers: Vec<sc_network_types::PeerId>,
 		hash: Block::Hash,
 		number: NumberFor<Block>,
 	);
@@ -357,7 +357,7 @@ where
 {
 	fn set_sync_fork_request(
 		&self,
-		peers: Vec<sc_network::PeerId>,
+		peers: Vec<sc_network_types::PeerId>,
 		hash: Block::Hash,
 		number: NumberFor<Block>,
 	) {
@@ -707,11 +707,13 @@ pub struct GrandpaParams<Block: BlockT, C, N, S, SC, VR> {
 /// Returns the configuration value to put in
 /// [`sc_network::config::FullNetworkConfiguration`].
 /// For standard protocol name see [`crate::protocol_standard_name`].
-pub fn grandpa_peers_set_config(
+pub fn grandpa_peers_set_config<B: BlockT, N: NetworkBackend<B, <B as BlockT>::Hash>>(
 	protocol_name: ProtocolName,
-) -> (sc_network::config::NonDefaultSetConfig, Box<dyn NotificationService>) {
+	metrics: sc_network::service::NotificationMetrics,
+	peer_store_handle: Arc<dyn sc_network::peer_store::PeerStoreProvider>,
+) -> (N::NotificationProtocolConfig, Box<dyn NotificationService>) {
 	use communication::grandpa_protocol_name;
-	sc_network::config::NonDefaultSetConfig::new(
+	N::notification_config(
 		protocol_name,
 		grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(),
 		// Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot.
@@ -723,6 +725,8 @@ pub fn grandpa_peers_set_config(
 			reserved_nodes: Vec::new(),
 			non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny,
 		},
+		metrics,
+		peer_store_handle,
 	)
 }
 
diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs
index 608ff5e46a0e825a6b7480a29258574f121e4aa2..90ad63ac78f3bec75dcc2faa6ce810573dbb9a95 100644
--- a/substrate/client/consensus/grandpa/src/observer.rs
+++ b/substrate/client/consensus/grandpa/src/observer.rs
@@ -410,7 +410,7 @@ mod tests {
 		communication::tests::{make_test_network, Event},
 	};
 	use assert_matches::assert_matches;
-	use sc_network::PeerId;
+	use sc_network_types::PeerId;
 	use sc_utils::mpsc::tracing_unbounded;
 	use sp_blockchain::HeaderBackend as _;
 	use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt};
diff --git a/substrate/client/consensus/grandpa/src/until_imported.rs b/substrate/client/consensus/grandpa/src/until_imported.rs
index f3874086b58143d185f87ad9c6e0eb009acf16e9..4103ee5279251d648f9e97168b952c582237d433 100644
--- a/substrate/client/consensus/grandpa/src/until_imported.rs
+++ b/substrate/client/consensus/grandpa/src/until_imported.rs
@@ -632,7 +632,7 @@ mod tests {
 	impl BlockSyncRequesterT<Block> for TestBlockSyncRequester {
 		fn set_sync_fork_request(
 			&self,
-			_peers: Vec<sc_network::PeerId>,
+			_peers: Vec<sc_network_types::PeerId>,
 			hash: Hash,
 			number: NumberFor<Block>,
 		) {
diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml
index 3beeae9f9b140549b09a7e05b5ffb5c76da40ebd..65b81bda4b08de83101a01880d1aaac1a358e1ad 100644
--- a/substrate/client/mixnet/Cargo.toml
+++ b/substrate/client/mixnet/Cargo.toml
@@ -23,13 +23,13 @@ bytes = "1"
 codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
 futures = "0.3.30"
 futures-timer = "3.0.2"
-libp2p-identity = { version = "0.1.3", features = ["peerid"] }
 log = { workspace = true, default-features = true }
 mixnet = "0.7.0"
 multiaddr = "0.17.1"
 parking_lot = "0.12.1"
 sc-client-api = { path = "../api" }
 sc-network = { path = "../network" }
+sc-network-types = { path = "../network/types" }
 sc-transaction-pool-api = { path = "../transaction-pool/api" }
 sp-api = { path = "../../primitives/api" }
 sp-consensus = { path = "../../primitives/consensus/common" }
diff --git a/substrate/client/mixnet/src/packet_dispatcher.rs b/substrate/client/mixnet/src/packet_dispatcher.rs
index 420e0c68847d86cb3cb76db3908994da44c09cb6..a7093527a0d9e7e3caaa115ffeeee14ec475f80b 100644
--- a/substrate/client/mixnet/src/packet_dispatcher.rs
+++ b/substrate/client/mixnet/src/packet_dispatcher.rs
@@ -20,11 +20,11 @@
 
 use super::peer_id::{from_core_peer_id, to_core_peer_id};
 use arrayvec::ArrayVec;
-use libp2p_identity::PeerId;
 use log::{debug, warn};
 use mixnet::core::{AddressedPacket, NetworkStatus, Packet, PeerId as CorePeerId};
 use parking_lot::Mutex;
 use sc_network::NotificationService;
+use sc_network_types::PeerId;
 use std::{collections::HashMap, future::Future, sync::Arc};
 
 const LOG_TARGET: &str = "mixnet";
diff --git a/substrate/client/mixnet/src/peer_id.rs b/substrate/client/mixnet/src/peer_id.rs
index 7984da8c75be77e245cd88b06451d38f443f904d..c9c53a0093c7a63473cd3c5b2bbefa4e37dc1510 100644
--- a/substrate/client/mixnet/src/peer_id.rs
+++ b/substrate/client/mixnet/src/peer_id.rs
@@ -16,21 +16,15 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use libp2p_identity::PeerId;
 use mixnet::core::PeerId as CorePeerId;
+use sc_network_types::PeerId;
 
 /// Convert a libp2p [`PeerId`] into a mixnet core [`PeerId`](CorePeerId).
 ///
 /// This will succeed only if `peer_id` is an Ed25519 public key ("hashed" using the identity
 /// hasher). Returns `None` on failure.
 pub fn to_core_peer_id(peer_id: &PeerId) -> Option<CorePeerId> {
-	let hash = peer_id.as_ref();
-	if hash.code() != 0 {
-		// Hash is not identity
-		return None
-	}
-	let public = libp2p_identity::PublicKey::try_decode_protobuf(hash.digest()).ok()?;
-	public.try_into_ed25519().ok().map(|public| public.to_bytes())
+	peer_id.into_ed25519()
 }
 
 /// Convert a mixnet core [`PeerId`](CorePeerId) into a libp2p [`PeerId`].
@@ -38,7 +32,5 @@ pub fn to_core_peer_id(peer_id: &PeerId) -> Option<CorePeerId> {
 /// This will succeed only if `peer_id` represents a point on the Ed25519 curve. Returns `None` on
 /// failure.
 pub fn from_core_peer_id(core_peer_id: &CorePeerId) -> Option<PeerId> {
-	let public = libp2p_identity::ed25519::PublicKey::try_from_bytes(core_peer_id).ok()?;
-	let public: libp2p_identity::PublicKey = public.into();
-	Some(public.into())
+	PeerId::from_ed25519(core_peer_id)
 }
diff --git a/substrate/client/mixnet/src/protocol.rs b/substrate/client/mixnet/src/protocol.rs
index 955502a4856a9aa8f143baa1cee7323ed005a078..c3139fd235a7abb692301bf9db7dad59999e4648 100644
--- a/substrate/client/mixnet/src/protocol.rs
+++ b/substrate/client/mixnet/src/protocol.rs
@@ -19,9 +19,12 @@
 use super::config::Config;
 use mixnet::core::PACKET_SIZE;
 use sc_network::{
-	config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig},
-	NotificationService, ProtocolName,
+	config::{NonReservedPeerMode, SetConfig},
+	peer_store::PeerStoreProvider,
+	service::NotificationMetrics,
+	NetworkBackend, NotificationService, ProtocolName,
 };
+use sp_runtime::traits::Block as BlockT;
 
 /// Returns the protocol name to use for the mixnet controlled by the given chain.
 pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName {
@@ -34,26 +37,37 @@ pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName
 }
 
 /// Returns the peers set configuration for the mixnet protocol.
-pub fn peers_set_config(
+pub fn peers_set_config<Block: BlockT, Network: NetworkBackend<Block, <Block as BlockT>::Hash>>(
 	name: ProtocolName,
 	config: &Config,
-) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
-	let (mut set_config, service) = NonDefaultSetConfig::new(
-		name,
-		Vec::new(),
-		PACKET_SIZE as u64,
-		None,
+	metrics: NotificationMetrics,
+	peerstore_handle: std::sync::Arc<dyn PeerStoreProvider>,
+) -> (Network::NotificationProtocolConfig, Box<dyn NotificationService>) {
+	let set_config = if config.substrate.num_gateway_slots != 0 {
+		// out_peers is always 0; we are only interested in connecting to mixnodes, which we do by
+		// setting them as reserved nodes
+		SetConfig {
+			in_peers: config.substrate.num_gateway_slots,
+			out_peers: 0,
+			reserved_nodes: Vec::new(),
+			non_reserved_mode: NonReservedPeerMode::Accept,
+		}
+	} else {
 		SetConfig {
 			in_peers: 0,
 			out_peers: 0,
 			reserved_nodes: Vec::new(),
 			non_reserved_mode: NonReservedPeerMode::Deny,
-		},
-	);
-	if config.substrate.num_gateway_slots != 0 {
-		// out_peers is always 0; we are only interested in connecting to mixnodes, which we do by
-		// setting them as reserved nodes
-		set_config.allow_non_reserved(config.substrate.num_gateway_slots, 0);
-	}
-	(set_config, service)
+		}
+	};
+
+	Network::notification_config(
+		name,
+		Vec::new(),
+		PACKET_SIZE as u64,
+		None,
+		set_config,
+		metrics,
+		peerstore_handle,
+	)
 }
diff --git a/substrate/client/mixnet/src/run.rs b/substrate/client/mixnet/src/run.rs
index 14d188df097721bc33c54199ce9ca7bd13c38cb2..5ed45a0e799ed7e8bb5ca030fcaa69b7515d427e 100644
--- a/substrate/client/mixnet/src/run.rs
+++ b/substrate/client/mixnet/src/run.rs
@@ -44,8 +44,8 @@ use mixnet::{
 };
 use sc_client_api::{BlockchainEvents, HeaderBackend};
 use sc_network::{
-	service::traits::{NotificationEvent, ValidationResult},
-	NetworkNotification, NetworkPeers, NetworkStateInfo, NotificationService, ProtocolName,
+	service::traits::{NetworkService, NotificationEvent, ValidationResult},
+	NetworkPeers, NetworkStateInfo, NotificationService, ProtocolName,
 };
 use sc_transaction_pool_api::{
 	LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool,
@@ -146,12 +146,12 @@ fn time_until(instant: Instant) -> Duration {
 
 /// Run the mixnet service. If `keystore` is `None`, the service will not attempt to register the
 /// local node as a mixnode, even if `config.register` is `true`.
-pub async fn run<B, C, S, N, P>(
+pub async fn run<B, C, S, P>(
 	config: Config,
 	mut api_backend: ApiBackend,
 	client: Arc<C>,
 	sync: Arc<S>,
-	network: Arc<N>,
+	network: Arc<dyn NetworkService>,
 	protocol_name: ProtocolName,
 	transaction_pool: Arc<P>,
 	keystore: Option<KeystorePtr>,
@@ -161,7 +161,6 @@ pub async fn run<B, C, S, N, P>(
 	C: BlockchainEvents<B> + ProvideRuntimeApi<B> + HeaderBackend<B>,
 	C::Api: MixnetApi<B>,
 	S: SyncOracle,
-	N: NetworkStateInfo + NetworkNotification + NetworkPeers,
 	P: TransactionPool<Block = B> + LocalTransactionPool<Block = B> + 'static,
 {
 	let local_peer_id = network.local_peer_id();
diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs
index f3be96025410dbbbf28cb24197a6e09ff436cae9..46c2334ceb46d8ef7966b666e480e83075a51703 100644
--- a/substrate/client/mixnet/src/sync_with_runtime.rs
+++ b/substrate/client/mixnet/src/sync_with_runtime.rs
@@ -20,13 +20,13 @@
 //! runtime to the core mixnet state. It is called every time a block is finalised.
 
 use super::peer_id::from_core_peer_id;
-use libp2p_identity::PeerId;
 use log::{debug, info};
 use mixnet::core::{
 	Mixnet, Mixnode as CoreMixnode, MixnodesErr as CoreMixnodesErr, RelSessionIndex,
 	SessionPhase as CoreSessionPhase, SessionStatus as CoreSessionStatus,
 };
 use multiaddr::{multiaddr, Multiaddr, Protocol};
+use sc_network_types::PeerId;
 use sp_api::{ApiError, ApiRef};
 use sp_mixnet::{
 	runtime_api::MixnetApi,
diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml
index 346e6bd6a5c6ff665ac36991c46b562a235de1cf..ad81381edea1800035a96b608681c5f3691d2d05 100644
--- a/substrate/client/network-gossip/Cargo.toml
+++ b/substrate/client/network-gossip/Cargo.toml
@@ -28,6 +28,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../..
 sc-network = { path = "../network" }
 sc-network-common = { path = "../network/common" }
 sc-network-sync = { path = "../network/sync" }
+sc-network-types = { path = "../network/types" }
 sp-runtime = { path = "../../primitives/runtime" }
 
 [dev-dependencies]
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 1d6a4bdd0c086996ed45a7074a1a51bcf34f12d9..cd344d9196d873b820c11246f1518e81a87f8683 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -32,9 +32,9 @@ use futures::{
 	channel::mpsc::{channel, Receiver, Sender},
 	prelude::*,
 };
-use libp2p::PeerId;
 use log::trace;
 use prometheus_endpoint::Registry;
+use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 use std::{
 	collections::{HashMap, VecDeque},
@@ -359,9 +359,7 @@ mod tests {
 	use sc_network::{
 		config::MultiaddrWithPeerId,
 		service::traits::{Direction, MessageSink, NotificationEvent},
-		Event, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
-		NotificationSenderError, NotificationSenderT as NotificationSender, NotificationService,
-		Roles,
+		Event, NetworkBlock, NetworkEventStream, NetworkPeers, NotificationService, Roles,
 	};
 	use sc_network_common::role::ObservedRole;
 	use sc_network_sync::SyncEventStream;
@@ -381,6 +379,7 @@ mod tests {
 	#[derive(Clone, Default)]
 	struct TestNetworkInner {}
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for TestNetwork {
 		fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
 			unimplemented!();
@@ -453,28 +452,14 @@ mod tests {
 				.ok()
 				.and_then(|role| Some(ObservedRole::from(role)))
 		}
-	}
 
-	impl NetworkEventStream for TestNetwork {
-		fn event_stream(&self, _name: &'static str) -> Pin<Box<dyn Stream<Item = Event> + Send>> {
+		async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
 			unimplemented!();
 		}
 	}
 
-	impl NetworkNotification for TestNetwork {
-		fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec<u8>) {
-			unimplemented!();
-		}
-
-		fn notification_sender(
-			&self,
-			_target: PeerId,
-			_protocol: ProtocolName,
-		) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
-			unimplemented!();
-		}
-
-		fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+	impl NetworkEventStream for TestNetwork {
+		fn event_stream(&self, _name: &'static str) -> Pin<Box<dyn Stream<Item = Event> + Send>> {
 			unimplemented!();
 		}
 	}
@@ -544,12 +529,12 @@ mod tests {
 			unimplemented!();
 		}
 
-		fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+		fn send_sync_notification(&mut self, _peer: &PeerId, _notification: Vec<u8>) {
 			unimplemented!();
 		}
 
 		async fn send_async_notification(
-			&self,
+			&mut self,
 			_peer: &PeerId,
 			_notification: Vec<u8>,
 		) -> Result<(), sc_network::error::Error> {
diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs
index a77141ec6f63b52cd04e8dd60a00243b49ec0570..e04ea2a91e7cbc59ae652dace7ede5398bedf01c 100644
--- a/substrate/client/network-gossip/src/lib.rs
+++ b/substrate/client/network-gossip/src/lib.rs
@@ -67,11 +67,9 @@ pub use self::{
 	validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext},
 };
 
-use libp2p::{multiaddr, PeerId};
-use sc_network::{
-	types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
-};
+use sc_network::{multiaddr, types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkPeers};
 use sc_network_sync::SyncEventStream;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 use std::iter;
 
@@ -80,7 +78,7 @@ mod state_machine;
 mod validator;
 
 /// Abstraction over a network.
-pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream + NetworkNotification {
+pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream {
 	fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
 		let addr =
 			iter::once(multiaddr::Protocol::P2p(who.into())).collect::<multiaddr::Multiaddr>();
@@ -97,7 +95,7 @@ pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream + NetworkNotific
 	}
 }
 
-impl<T, B: BlockT> Network<B> for T where T: NetworkPeers + NetworkEventStream + NetworkNotification {}
+impl<T, B: BlockT> Network<B> for T where T: NetworkPeers + NetworkEventStream {}
 
 /// Abstraction over the syncing subsystem.
 pub trait Syncing<B: BlockT>: SyncEventStream + NetworkBlock<B::Hash, NumberFor<B>> {}
diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs
index 069d7cdba16599b4b4da0965a5d8e4588478d633..016afa95eceaed228012641af0982e103c51eb64 100644
--- a/substrate/client/network-gossip/src/state_machine.rs
+++ b/substrate/client/network-gossip/src/state_machine.rs
@@ -19,7 +19,7 @@
 use crate::{MessageIntent, Network, ValidationResult, Validator, ValidatorContext};
 
 use ahash::AHashSet;
-use libp2p::PeerId;
+use sc_network_types::PeerId;
 use schnellru::{ByLength, LruMap};
 
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
@@ -546,8 +546,7 @@ mod tests {
 	use futures::prelude::*;
 	use sc_network::{
 		config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink,
-		NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
-		NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange,
+		NetworkBlock, NetworkEventStream, NetworkPeers, ReputationChange,
 	};
 	use sp_runtime::{
 		testing::{Block as RawBlock, ExtrinsicWrapper, H256},
@@ -608,6 +607,7 @@ mod tests {
 		peer_reports: Vec<(PeerId, ReputationChange)>,
 	}
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for NoOpNetwork {
 		fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
 			unimplemented!();
@@ -680,28 +680,14 @@ mod tests {
 		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
 			None
 		}
-	}
 
-	impl NetworkEventStream for NoOpNetwork {
-		fn event_stream(&self, _name: &'static str) -> Pin<Box<dyn Stream<Item = Event> + Send>> {
+		async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
 			unimplemented!();
 		}
 	}
 
-	impl NetworkNotification for NoOpNetwork {
-		fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec<u8>) {
-			unimplemented!();
-		}
-
-		fn notification_sender(
-			&self,
-			_target: PeerId,
-			_protocol: ProtocolName,
-		) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
-			unimplemented!();
-		}
-
-		fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+	impl NetworkEventStream for NoOpNetwork {
+		fn event_stream(&self, _name: &'static str) -> Pin<Box<dyn Stream<Item = Event> + Send>> {
 			unimplemented!();
 		}
 	}
@@ -736,13 +722,13 @@ mod tests {
 		}
 
 		/// Send synchronous `notification` to `peer`.
-		fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec<u8>) {
+		fn send_sync_notification(&mut self, _peer: &PeerId, _notification: Vec<u8>) {
 			unimplemented!();
 		}
 
 		/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
 		async fn send_async_notification(
-			&self,
+			&mut self,
 			_peer: &PeerId,
 			_notification: Vec<u8>,
 		) -> Result<(), sc_network::error::Error> {
diff --git a/substrate/client/network-gossip/src/validator.rs b/substrate/client/network-gossip/src/validator.rs
index 2272efba50652492178fe7cfa2c0bcbd731e9138..974c6d9e0cdc0e10e95ab59440241b908fcf2992 100644
--- a/substrate/client/network-gossip/src/validator.rs
+++ b/substrate/client/network-gossip/src/validator.rs
@@ -16,8 +16,8 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use libp2p::PeerId;
 use sc_network_common::role::ObservedRole;
+use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 
 /// Validates consensus messages.
diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml
index a891336d24172627fcacaa3efaa3f73f98b448a0..0879481a419930fae590e3ac68cde17f1ceb2207 100644
--- a/substrate/client/network/Cargo.toml
+++ b/substrate/client/network/Cargo.toml
@@ -16,12 +16,16 @@ workspace = true
 [package.metadata.docs.rs]
 targets = ["x86_64-unknown-linux-gnu"]
 
+[build-dependencies]
+prost-build = "0.11"
+
 [dependencies]
 array-bytes = "6.1"
 async-channel = "1.8.0"
 async-trait = "0.1.79"
 asynchronous-codec = "0.6"
 bytes = "1"
+cid = "0.9.0"
 codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] }
 either = "1.5.3"
 fnv = "1.0.6"
@@ -42,17 +46,23 @@ smallvec = "1.11.0"
 thiserror = { workspace = true }
 tokio = { version = "1.22.0", features = ["macros", "sync"] }
 tokio-stream = "0.1.7"
-unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] }
+unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec", "futures"] }
 zeroize = "1.4.3"
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" }
+prost = "0.11"
 sc-client-api = { path = "../api" }
 sc-network-common = { path = "common" }
+sc-network-types = { path = "types" }
 sc-utils = { path = "../utils" }
 sp-arithmetic = { path = "../../primitives/arithmetic" }
 sp-blockchain = { path = "../../primitives/blockchain" }
 sp-core = { path = "../../primitives/core" }
 sp-runtime = { path = "../../primitives/runtime" }
 wasm-timer = "0.2"
+litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" }
+once_cell = "1.18.0"
+void = "1.0.2"
+schnellru = "0.2.1"
 
 [dev-dependencies]
 assert_matches = "1.3"
@@ -63,8 +73,11 @@ tempfile = "3.1.0"
 tokio = { version = "1.22.0", features = ["macros"] }
 tokio-util = { version = "0.7.4", features = ["compat"] }
 tokio-test = "0.4.2"
+sc-block-builder = { path = "../block-builder" }
 sc-network-light = { path = "light" }
 sc-network-sync = { path = "sync" }
+sp-crypto-hashing = { path = "../../primitives/crypto/hashing" }
+sp-consensus = { path = "../../primitives/consensus/common" }
 sp-test-primitives = { path = "../../primitives/test-primitives" }
 sp-tracing = { path = "../../primitives/tracing" }
 substrate-test-runtime = { path = "../../test-utils/runtime" }
diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml
deleted file mode 100644
index 587e2e70867ba8f055bf0a3e267c48fcf4ea0cb9..0000000000000000000000000000000000000000
--- a/substrate/client/network/bitswap/Cargo.toml
+++ /dev/null
@@ -1,42 +0,0 @@
-[package]
-description = "Substrate bitswap protocol"
-name = "sc-network-bitswap"
-version = "0.33.0"
-license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
-authors.workspace = true
-edition.workspace = true
-homepage = "https://substrate.io"
-repository.workspace = true
-documentation = "https://docs.rs/sc-network-bitswap"
-
-[lints]
-workspace = true
-
-[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
-
-[build-dependencies]
-prost-build = "0.11"
-
-[dependencies]
-async-channel = "1.8.0"
-cid = "0.9.0"
-futures = "0.3.30"
-libp2p-identity = { version = "0.1.3", features = ["peerid"] }
-log = { workspace = true, default-features = true }
-prost = "0.12"
-thiserror = { workspace = true }
-unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] }
-sc-client-api = { path = "../../api" }
-sc-network = { path = ".." }
-sp-blockchain = { path = "../../../primitives/blockchain" }
-sp-runtime = { path = "../../../primitives/runtime" }
-
-[dev-dependencies]
-tokio = { version = "1.22.0", features = ["full"] }
-sc-block-builder = { path = "../../block-builder" }
-sc-consensus = { path = "../../consensus/common" }
-sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" }
-sp-consensus = { path = "../../../primitives/consensus/common" }
-substrate-test-runtime = { path = "../../../test-utils/runtime" }
-substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" }
diff --git a/substrate/client/network/bitswap/build.rs b/substrate/client/network/build.rs
similarity index 100%
rename from substrate/client/network/bitswap/build.rs
rename to substrate/client/network/build.rs
diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml
index f9248b0bb51cac8e605141d791688f73a65f602f..4478693456f7e32bcab4cf6ce8a94a48a5e7c727 100644
--- a/substrate/client/network/common/Cargo.toml
+++ b/substrate/client/network/common/Cargo.toml
@@ -27,6 +27,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", features = [
 futures = "0.3.30"
 libp2p-identity = { version = "0.1.3", features = ["peerid"] }
 sc-consensus = { path = "../../consensus/common" }
+sc-network-types = { path = "../types" }
 sp-consensus = { path = "../../../primitives/consensus/common" }
 sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" }
 sp-runtime = { path = "../../../primitives/runtime" }
diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml
index 2628fd07d3e4972de4e58135f23e4bc2e3cf008b..d75a2a908da54ceb7c27a3ba79e4ac6c0d2ba071 100644
--- a/substrate/client/network/light/Cargo.toml
+++ b/substrate/client/network/light/Cargo.toml
@@ -25,11 +25,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", features = [
 	"derive",
 ] }
 futures = "0.3.30"
-libp2p-identity = { version = "0.1.3", features = ["peerid"] }
 log = { workspace = true, default-features = true }
 prost = "0.12"
 sp-blockchain = { path = "../../../primitives/blockchain" }
 sc-client-api = { path = "../../api" }
+sc-network-types = { path = "../types" }
 sc-network = { path = ".." }
 sp-core = { path = "../../../primitives/core" }
 sp-runtime = { path = "../../../primitives/runtime" }
diff --git a/substrate/client/network/light/src/light_client_requests.rs b/substrate/client/network/light/src/light_client_requests.rs
index 4d2a301c00e6b491e204f008ca0393608e966933..e55ceb62d7cd80b26b8926c627f4407a918fc7aa 100644
--- a/substrate/client/network/light/src/light_client_requests.rs
+++ b/substrate/client/network/light/src/light_client_requests.rs
@@ -18,7 +18,8 @@
 
 //! Helpers for outgoing and incoming light client requests.
 
-use sc_network::{config::ProtocolId, request_responses::ProtocolConfig};
+use sc_network::{config::ProtocolId, request_responses::IncomingRequest, NetworkBackend};
+use sp_runtime::traits::Block;
 
 use std::time::Duration;
 
@@ -40,20 +41,24 @@ fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String {
 	format!("/{}/light/2", protocol_id.as_ref())
 }
 
-/// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming
-/// requests.
-pub fn generate_protocol_config<Hash: AsRef<[u8]>>(
+/// Generates a `RequestResponseProtocolConfig` for the light client request protocol, refusing
+/// incoming requests.
+pub fn generate_protocol_config<
+	Hash: AsRef<[u8]>,
+	B: Block,
+	N: NetworkBackend<B, <B as Block>::Hash>,
+>(
 	protocol_id: &ProtocolId,
 	genesis_hash: Hash,
 	fork_id: Option<&str>,
-) -> ProtocolConfig {
-	ProtocolConfig {
-		name: generate_protocol_name(genesis_hash, fork_id).into(),
-		fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into())
-			.collect(),
-		max_request_size: 1 * 1024 * 1024,
-		max_response_size: 16 * 1024 * 1024,
-		request_timeout: Duration::from_secs(15),
-		inbound_queue: None,
-	}
+	inbound_queue: async_channel::Sender<IncomingRequest>,
+) -> N::RequestResponseProtocolConfig {
+	N::request_response_config(
+		generate_protocol_name(genesis_hash, fork_id).into(),
+		std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
+		1 * 1024 * 1024,
+		16 * 1024 * 1024,
+		Duration::from_secs(15),
+		Some(inbound_queue),
+	)
 }
diff --git a/substrate/client/network/light/src/light_client_requests/handler.rs b/substrate/client/network/light/src/light_client_requests/handler.rs
index 8f2bcc7384b33515412dc7ccdc7d695ce6db87da..49bdf7007e26126b21d6e63c386d151c5bbfd400 100644
--- a/substrate/client/network/light/src/light_client_requests/handler.rs
+++ b/substrate/client/network/light/src/light_client_requests/handler.rs
@@ -25,15 +25,15 @@
 use crate::schema;
 use codec::{self, Decode, Encode};
 use futures::prelude::*;
-use libp2p_identity::PeerId;
 use log::{debug, trace};
 use prost::Message;
 use sc_client_api::{BlockBackend, ProofProvider};
 use sc_network::{
 	config::ProtocolId,
-	request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
-	ReputationChange,
+	request_responses::{IncomingRequest, OutgoingResponse},
+	NetworkBackend, ReputationChange,
 };
+use sc_network_types::PeerId;
 use sp_core::{
 	hexdisplay::HexDisplay,
 	storage::{ChildInfo, ChildType, PrefixedStorageKey},
@@ -61,14 +61,14 @@ where
 	Client: BlockBackend<B> + ProofProvider<B> + Send + Sync + 'static,
 {
 	/// Create a new [`LightClientRequestHandler`].
-	pub fn new(
+	pub fn new<N: NetworkBackend<B, <B as Block>::Hash>>(
 		protocol_id: &ProtocolId,
 		fork_id: Option<&str>,
 		client: Arc<Client>,
-	) -> (Self, ProtocolConfig) {
+	) -> (Self, N::RequestResponseProtocolConfig) {
 		let (tx, request_receiver) = async_channel::bounded(MAX_LIGHT_REQUEST_QUEUE);
 
-		let mut protocol_config = super::generate_protocol_config(
+		let protocol_config = super::generate_protocol_config::<_, B, N>(
 			protocol_id,
 			client
 				.block_hash(0u32.into())
@@ -76,8 +76,8 @@ where
 				.flatten()
 				.expect("Genesis block exists; qed"),
 			fork_id,
+			tx,
 		);
-		protocol_config.inbound_queue = Some(tx);
 
 		(Self { client, request_receiver, _block: PhantomData::default() }, protocol_config)
 	}
diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs
index 1f234683392f176d4e20f15ffed71b661daeed3f..833ff5d09e5eeac2fe6ba3ab74a2c499281a27e0 100644
--- a/substrate/client/network/src/behaviour.rs
+++ b/substrate/client/network/src/behaviour.rs
@@ -20,7 +20,7 @@ use crate::{
 	discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut},
 	event::DhtEvent,
 	peer_info,
-	peer_store::PeerStoreHandle,
+	peer_store::PeerStoreProvider,
 	protocol::{CustomMessageOutcome, NotificationsSink, Protocol},
 	protocol_controller::SetId,
 	request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure},
@@ -173,7 +173,7 @@ impl<B: BlockT> Behaviour<B> {
 		local_public_key: PublicKey,
 		disco_config: DiscoveryConfig,
 		request_response_protocols: Vec<ProtocolConfig>,
-		peer_store_handle: PeerStoreHandle,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
 		external_addresses: Arc<Mutex<HashSet<Multiaddr>>>,
 	) -> Result<Self, request_responses::RegisterError> {
 		Ok(Self {
@@ -186,7 +186,7 @@ impl<B: BlockT> Behaviour<B> {
 			discovery: disco_config.finish(),
 			request_responses: request_responses::RequestResponsesBehaviour::new(
 				request_response_protocols.into_iter(),
-				Box::new(peer_store_handle),
+				peer_store_handle,
 			)?,
 		})
 	}
diff --git a/substrate/client/network/bitswap/src/lib.rs b/substrate/client/network/src/bitswap/mod.rs
similarity index 99%
rename from substrate/client/network/bitswap/src/lib.rs
rename to substrate/client/network/src/bitswap/mod.rs
index 1ba95e30bad101e92adc2291b5d0928896f33269..22f1973adcb2ece0b2fe751ef9c5e94b1bb1fd85 100644
--- a/substrate/client/network/bitswap/src/lib.rs
+++ b/substrate/client/network/src/bitswap/mod.rs
@@ -20,16 +20,17 @@
 //! Only supports bitswap 1.2.0.
 //! CID is expected to reference 256-bit Blake2b transaction hash.
 
+use crate::{
+	request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
+	types::ProtocolName,
+};
+
 use cid::{self, Version};
 use futures::StreamExt;
-use libp2p_identity::PeerId;
 use log::{debug, error, trace};
 use prost::Message;
 use sc_client_api::BlockBackend;
-use sc_network::{
-	request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
-	types::ProtocolName,
-};
+use sc_network_types::PeerId;
 use schema::bitswap::{
 	message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType},
 	Message as BitswapMessage,
diff --git a/substrate/client/network/bitswap/src/schema.rs b/substrate/client/network/src/bitswap/schema.rs
similarity index 100%
rename from substrate/client/network/bitswap/src/schema.rs
rename to substrate/client/network/src/bitswap/schema.rs
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index 24e96843c32d62187a666e1e7ee0a053c5af2482..e6cc9de56942700940da2644b2c5b57c0abd144d 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -23,21 +23,26 @@
 
 pub use crate::{
 	discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR,
+	peer_store::PeerStoreProvider,
 	protocol::{notification_service, NotificationsSink, ProtocolHandlePair},
 	request_responses::{
 		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
 	},
-	service::traits::NotificationService,
+	service::{
+		metrics::NotificationMetrics,
+		traits::{NotificationConfig, NotificationService, PeerStore},
+	},
 	types::ProtocolName,
 };
 
 pub use libp2p::{
 	build_multiaddr,
 	identity::{self, ed25519, Keypair},
-	multiaddr, Multiaddr, PeerId,
+	multiaddr, Multiaddr,
 };
+use sc_network_types::PeerId;
 
-use crate::peer_store::PeerStoreHandle;
+use crate::service::{ensure_addresses_consistent_with_transport, traits::NetworkBackend};
 use codec::Encode;
 use prometheus_endpoint::Registry;
 use zeroize::Zeroize;
@@ -61,6 +66,7 @@ use std::{
 	path::{Path, PathBuf},
 	pin::Pin,
 	str::{self, FromStr},
+	sync::Arc,
 };
 
 /// Protocol name prefix, transmitted on the wire for legacy protocol names.
@@ -99,7 +105,7 @@ impl fmt::Debug for ProtocolId {
 /// let (peer_id, addr) = parse_str_addr(
 /// 	"/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"
 /// ).unwrap();
-/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::<PeerId>().unwrap());
+/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::<PeerId>().unwrap().into());
 /// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::<Multiaddr>().unwrap());
 /// ```
 pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> {
@@ -569,6 +575,17 @@ impl NonDefaultSetConfig {
 	}
 }
 
+impl NotificationConfig for NonDefaultSetConfig {
+	fn set_config(&self) -> &SetConfig {
+		&self.set_config
+	}
+
+	/// Get reference to protocol name.
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+}
+
 /// Network service configuration.
 #[derive(Clone, Debug)]
 pub struct NetworkConfiguration {
@@ -655,6 +672,9 @@ pub struct NetworkConfiguration {
 	/// a modification of the way the implementation works. Different nodes with different
 	/// configured values remain compatible with each other.
 	pub yamux_window_size: Option<u32>,
+
+	/// Networking backend used for P2P communication.
+	pub network_backend: NetworkBackendType,
 }
 
 impl NetworkConfiguration {
@@ -687,6 +707,7 @@ impl NetworkConfiguration {
 				.expect("value is a constant; constant is non-zero; qed."),
 			yamux_window_size: None,
 			ipfs_server: false,
+			network_backend: NetworkBackendType::Libp2p,
 		}
 	}
 
@@ -722,18 +743,15 @@ impl NetworkConfiguration {
 }
 
 /// Network initialization parameters.
-pub struct Params<Block: BlockT> {
+pub struct Params<Block: BlockT, H: ExHashT, N: NetworkBackend<Block, H>> {
 	/// Assigned role for our node (full, light, ...).
 	pub role: Role,
 
 	/// How to spawn background tasks.
-	pub executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send>,
+	pub executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send + Sync>,
 
 	/// Network layer configuration.
-	pub network_config: FullNetworkConfiguration,
-
-	/// Peer store with known nodes, peer reputations, etc.
-	pub peer_store: PeerStoreHandle,
+	pub network_config: FullNetworkConfiguration<Block, H, N>,
 
 	/// Legacy name of the protocol to use on the wire. Should be different for each chain.
 	pub protocol_id: ProtocolId,
@@ -749,25 +767,43 @@ pub struct Params<Block: BlockT> {
 	pub metrics_registry: Option<Registry>,
 
 	/// Block announce protocol configuration
-	pub block_announce_config: NonDefaultSetConfig,
+	pub block_announce_config: N::NotificationProtocolConfig,
+
+	/// Bitswap configuration, if the server has been enabled.
+	pub bitswap_config: Option<N::BitswapConfig>,
+
+	/// Notification metrics.
+	pub notification_metrics: NotificationMetrics,
 }
 
 /// Full network configuration.
-pub struct FullNetworkConfiguration {
+pub struct FullNetworkConfiguration<B: BlockT + 'static, H: ExHashT, N: NetworkBackend<B, H>> {
 	/// Installed notification protocols.
-	pub(crate) notification_protocols: Vec<NonDefaultSetConfig>,
+	pub(crate) notification_protocols: Vec<N::NotificationProtocolConfig>,
 
 	/// List of request-response protocols that the node supports.
-	pub(crate) request_response_protocols: Vec<RequestResponseConfig>,
+	pub(crate) request_response_protocols: Vec<N::RequestResponseProtocolConfig>,
 
 	/// Network configuration.
 	pub network_config: NetworkConfiguration,
+
+	/// [`PeerStore`](crate::peer_store::PeerStore),
+	peer_store: Option<N::PeerStore>,
+
+	/// Handle to [`PeerStore`](crate::peer_store::PeerStore).
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
 }
 
-impl FullNetworkConfiguration {
+impl<B: BlockT + 'static, H: ExHashT, N: NetworkBackend<B, H>> FullNetworkConfiguration<B, H, N> {
 	/// Create new [`FullNetworkConfiguration`].
 	pub fn new(network_config: &NetworkConfiguration) -> Self {
+		let bootnodes = network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect();
+		let peer_store = N::peer_store(bootnodes);
+		let peer_store_handle = peer_store.handle();
+
 		Self {
+			peer_store: Some(peer_store),
+			peer_store_handle,
 			notification_protocols: Vec::new(),
 			request_response_protocols: Vec::new(),
 			network_config: network_config.clone(),
@@ -775,19 +811,131 @@ impl FullNetworkConfiguration {
 	}
 
 	/// Add a notification protocol.
-	pub fn add_notification_protocol(&mut self, config: NonDefaultSetConfig) {
+	pub fn add_notification_protocol(&mut self, config: N::NotificationProtocolConfig) {
 		self.notification_protocols.push(config);
 	}
 
 	/// Get reference to installed notification protocols.
-	pub fn notification_protocols(&self) -> &Vec<NonDefaultSetConfig> {
+	pub fn notification_protocols(&self) -> &Vec<N::NotificationProtocolConfig> {
 		&self.notification_protocols
 	}
 
 	/// Add a request-response protocol.
-	pub fn add_request_response_protocol(&mut self, config: RequestResponseConfig) {
+	pub fn add_request_response_protocol(&mut self, config: N::RequestResponseProtocolConfig) {
 		self.request_response_protocols.push(config);
 	}
+
+	/// Get handle to [`PeerStore`].
+	pub fn peer_store_handle(&self) -> Arc<dyn PeerStoreProvider> {
+		Arc::clone(&self.peer_store_handle)
+	}
+
+	/// Take [`PeerStore`].
+	///
+	/// `PeerStore` is created when `FullNetworkConfig` is initialized so that `PeerStoreHandle`s
+	/// can be passed onto notification protocols. `PeerStore` itself should be started only once
+	/// and since technically it's not a libp2p task, it should be started with `SpawnHandle` in
+	/// `builder.rs` instead of using the libp2p/litep2p executor in the networking backend. This
+	/// function consumes `PeerStore` and starts its event loop in the appropriate place.
+	pub fn take_peer_store(&mut self) -> N::PeerStore {
+		self.peer_store
+			.take()
+			.expect("`PeerStore` can only be taken once when it's started; qed")
+	}
+
+	/// Verify addresses are consistent with enabled transports.
+	pub fn sanity_check_addresses(&self) -> Result<(), crate::error::Error> {
+		ensure_addresses_consistent_with_transport(
+			self.network_config.listen_addresses.iter(),
+			&self.network_config.transport,
+		)?;
+		ensure_addresses_consistent_with_transport(
+			self.network_config.boot_nodes.iter().map(|x| &x.multiaddr),
+			&self.network_config.transport,
+		)?;
+		ensure_addresses_consistent_with_transport(
+			self.network_config
+				.default_peers_set
+				.reserved_nodes
+				.iter()
+				.map(|x| &x.multiaddr),
+			&self.network_config.transport,
+		)?;
+
+		for notification_protocol in &self.notification_protocols {
+			ensure_addresses_consistent_with_transport(
+				notification_protocol.set_config().reserved_nodes.iter().map(|x| &x.multiaddr),
+				&self.network_config.transport,
+			)?;
+		}
+		ensure_addresses_consistent_with_transport(
+			self.network_config.public_addresses.iter(),
+			&self.network_config.transport,
+		)?;
+
+		Ok(())
+	}
+
+	/// Check for duplicate bootnodes.
+	pub fn sanity_check_bootnodes(&self) -> Result<(), crate::error::Error> {
+		self.network_config.boot_nodes.iter().try_for_each(|bootnode| {
+			if let Some(other) = self
+				.network_config
+				.boot_nodes
+				.iter()
+				.filter(|o| o.multiaddr == bootnode.multiaddr)
+				.find(|o| o.peer_id != bootnode.peer_id)
+			{
+				Err(crate::error::Error::DuplicateBootnode {
+					address: bootnode.multiaddr.clone(),
+					first_id: bootnode.peer_id.into(),
+					second_id: other.peer_id.into(),
+				})
+			} else {
+				Ok(())
+			}
+		})
+	}
+
+	/// Collect all reserved nodes and bootnodes addresses.
+	pub fn known_addresses(&self) -> Vec<(PeerId, Multiaddr)> {
+		let mut addresses: Vec<_> = self
+			.network_config
+			.default_peers_set
+			.reserved_nodes
+			.iter()
+			.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
+			.chain(self.notification_protocols.iter().flat_map(|protocol| {
+				protocol
+					.set_config()
+					.reserved_nodes
+					.iter()
+					.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
+			}))
+			.chain(
+				self.network_config
+					.boot_nodes
+					.iter()
+					.map(|bootnode| (bootnode.peer_id, bootnode.multiaddr.clone())),
+			)
+			.collect();
+
+		// Remove possible duplicates.
+		addresses.sort();
+		addresses.dedup();
+
+		addresses
+	}
+}
+
+/// Network backend type.
+#[derive(Debug, Clone)]
+pub enum NetworkBackendType {
+	/// Use libp2p for P2P networking.
+	Libp2p,
+
+	/// Use litep2p for P2P networking.
+	Litep2p,
 }
 
 #[cfg(test)]
diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs
index 01e8356fb55355cd50633d0fe8d2f9f565957b23..b776e3e1ad9de8f84118f293f28ad00c88969ba0 100644
--- a/substrate/client/network/src/error.rs
+++ b/substrate/client/network/src/error.rs
@@ -77,6 +77,9 @@ pub enum Error {
 	/// Connection closed.
 	#[error("Connection closed")]
 	ConnectionClosed,
+	/// Litep2p error.
+	#[error("Litep2p error: `{0}`")]
+	Litep2p(litep2p::Error),
 }
 
 // Make `Debug` use the `Display` implementation.
diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs
index 4c39c57e8dfcad122f7a2ecf207bc0b877862b78..8f479825c8d77c7684333981d1763099b8a537c9 100644
--- a/substrate/client/network/src/lib.rs
+++ b/substrate/client/network/src/lib.rs
@@ -243,6 +243,8 @@
 //! More precise usage details are still being worked on and will likely change in the future.
 
 mod behaviour;
+mod bitswap;
+mod litep2p;
 mod protocol;
 
 #[cfg(test)]
@@ -262,27 +264,28 @@ pub mod transport;
 pub mod types;
 pub mod utils;
 
+pub use crate::litep2p::Litep2pNetworkBackend;
 pub use event::{DhtEvent, Event};
 #[doc(inline)]
-pub use libp2p::{multiaddr, Multiaddr, PeerId};
 pub use request_responses::{Config, IfDisconnected, RequestFailure};
 pub use sc_network_common::{
 	role::{ObservedRole, Roles},
 	types::ReputationChange,
 };
 pub use service::{
+	metrics::NotificationMetrics,
 	signature::Signature,
 	traits::{
-		KademliaKey, MessageSink, NetworkBlock, NetworkDHTProvider, NetworkEventStream,
-		NetworkNotification, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo,
-		NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest,
+		KademliaKey, MessageSink, NetworkBackend, NetworkBlock, NetworkDHTProvider,
+		NetworkEventStream, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo,
+		NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, NotificationConfig,
 		NotificationSender as NotificationSenderT, NotificationSenderError,
 		NotificationSenderReady, NotificationService,
 	},
 	DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure,
 	PublicKey,
 };
-pub use types::ProtocolName;
+pub use types::{multiaddr, Multiaddr, PeerId, ProtocolName};
 
 /// The maximum allowed number of established connections per peer.
 ///
diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs
new file mode 100644
index 0000000000000000000000000000000000000000..27f4d5473722186e986deef29bb8a9b3eb0aeaec
--- /dev/null
+++ b/substrate/client/network/src/litep2p/discovery.rs
@@ -0,0 +1,528 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! libp2p-related discovery code for litep2p backend.
+
+use crate::{
+	config::{NetworkConfiguration, ProtocolId},
+	multiaddr::Protocol,
+	peer_store::PeerStoreProvider,
+	Multiaddr,
+};
+
+use array_bytes::bytes2hex;
+use futures::{FutureExt, Stream};
+use futures_timer::Delay;
+use ip_network::IpNetwork;
+use libp2p::kad::record::Key as KademliaKey;
+use litep2p::{
+	protocol::{
+		libp2p::{
+			identify::{Config as IdentifyConfig, IdentifyEvent},
+			kademlia::{
+				Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent,
+				KademliaHandle, QueryId, Quorum, Record, RecordKey,
+			},
+			ping::{Config as PingConfig, PingEvent},
+		},
+		mdns::{Config as MdnsConfig, MdnsEvent},
+	},
+	PeerId, ProtocolName,
+};
+use parking_lot::RwLock;
+use schnellru::{ByLength, LruMap};
+
+use std::{
+	cmp,
+	collections::{HashMap, HashSet, VecDeque},
+	pin::Pin,
+	sync::Arc,
+	task::{Context, Poll},
+	time::Duration,
+};
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::discovery";
+
+/// Kademlia query interval.
+const KADEMLIA_QUERY_INTERVAL: Duration = Duration::from_secs(5);
+
+/// mDNS query interval.
+const MDNS_QUERY_INTERVAL: Duration = Duration::from_secs(30);
+
+/// Minimum number of confirmations received before an address is verified.
+const MIN_ADDRESS_CONFIRMATIONS: usize = 5;
+
+/// Discovery events.
+#[derive(Debug)]
+pub enum DiscoveryEvent {
+	/// Ping RTT measured for peer.
+	Ping {
+		/// Remote peer ID.
+		peer: PeerId,
+
+		/// Ping round-trip time.
+		rtt: Duration,
+	},
+
+	/// Peer identified over `/ipfs/identify/1.0.0` protocol.
+	Identified {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Identify protocol version.
+		protocol_version: Option<String>,
+
+		/// Identify user agent version.
+		user_agent: Option<String>,
+
+		/// Observed address.
+		observed_address: Multiaddr,
+
+		/// Listen addresses.
+		listen_addresses: Vec<Multiaddr>,
+
+		/// Supported protocols.
+		supported_protocols: HashSet<ProtocolName>,
+	},
+
+	/// One or more addresses discovered.
+	Discovered {
+		/// Discovered addresses.
+		addresses: Vec<Multiaddr>,
+	},
+
+	/// Routing table has been updated.
+	RoutingTableUpdate {
+		/// Peers that were added to routing table.
+		peers: HashSet<PeerId>,
+	},
+
+	/// New external address discovered.
+	ExternalAddressDiscovered {
+		/// Discovered addresses.
+		address: Multiaddr,
+	},
+
+	/// Record was found from the DHT.
+	GetRecordSuccess {
+		/// Query ID.
+		query_id: QueryId,
+
+		/// Record.
+		record: Record,
+	},
+
+	/// Record was successfully stored on the DHT.
+	PutRecordSuccess {
+		/// Query ID.
+		query_id: QueryId,
+	},
+
+	/// Query failed.
+	QueryFailed {
+		/// Query ID.
+		query_id: QueryId,
+	},
+}
+
+/// Discovery.
+pub struct Discovery {
+	/// Ping event stream.
+	ping_event_stream: Box<dyn Stream<Item = PingEvent> + Send + Unpin>,
+
+	/// Identify event stream.
+	identify_event_stream: Box<dyn Stream<Item = IdentifyEvent> + Send + Unpin>,
+
+	/// mDNS event stream, if enabled.
+	mdns_event_stream: Option<Box<dyn Stream<Item = MdnsEvent> + Send + Unpin>>,
+
+	/// Kademlia handle.
+	kademlia_handle: KademliaHandle,
+
+	/// `Peerstore` handle.
+	_peerstore_handle: Arc<dyn PeerStoreProvider>,
+
+	/// Next Kademlia query for a random peer ID.
+	///
+	/// If `None`, there is currently a query pending.
+	next_kad_query: Option<Delay>,
+
+	/// Active `FIND_NODE` query if it exists.
+	find_node_query_id: Option<QueryId>,
+
+	/// Pending events.
+	pending_events: VecDeque<DiscoveryEvent>,
+
+	/// Allow non-global addresses in the DHT.
+	allow_non_global_addresses: bool,
+
+	/// Protocols supported by the local node.
+	local_protocols: HashSet<ProtocolName>,
+
+	/// Public addresses.
+	public_addresses: HashSet<Multiaddr>,
+
+	/// Listen addresses.
+	listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+
+	/// External address confirmations.
+	address_confirmations: LruMap<Multiaddr, usize>,
+
+	/// Delay to next `FIND_NODE` query.
+	duration_to_next_find_query: Duration,
+}
+
+/// Legacy (fallback) Kademlia protocol name based on `protocol_id`.
+fn legacy_kademlia_protocol_name(id: &ProtocolId) -> ProtocolName {
+	ProtocolName::from(format!("/{}/kad", id.as_ref()))
+}
+
+/// Kademlia protocol name based on `genesis_hash` and `fork_id`.
+fn kademlia_protocol_name<Hash: AsRef<[u8]>>(
+	genesis_hash: Hash,
+	fork_id: Option<&str>,
+) -> ProtocolName {
+	let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref());
+	let protocol = if let Some(fork_id) = fork_id {
+		format!("/{}/{}/kad", genesis_hash_hex, fork_id)
+	} else {
+		format!("/{}/kad", genesis_hash_hex)
+	};
+
+	ProtocolName::from(protocol)
+}
+
+impl Discovery {
+	/// Create new [`Discovery`].
+	///
+	/// Enables `/ipfs/ping/1.0.0` and `/ipfs/identify/1.0.0` by default and starts
+	/// the mDNS peer discovery if it was enabled.
+	pub fn new<Hash: AsRef<[u8]> + Clone>(
+		config: &NetworkConfiguration,
+		genesis_hash: Hash,
+		fork_id: Option<&str>,
+		protocol_id: &ProtocolId,
+		known_peers: HashMap<PeerId, Vec<Multiaddr>>,
+		listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+		_peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option<MdnsConfig>) {
+		let (ping_config, ping_event_stream) = PingConfig::default();
+		let user_agent = format!("{} ({})", config.client_version, config.node_name);
+		let (identify_config, identify_event_stream) = IdentifyConfig::new(
+			"/substrate/1.0".to_string(),
+			Some(user_agent),
+			config.public_addresses.clone(),
+		);
+
+		let (mdns_config, mdns_event_stream) = match config.transport {
+			crate::config::TransportConfig::Normal { enable_mdns, .. } => match enable_mdns {
+				true => {
+					let (mdns_config, mdns_event_stream) = MdnsConfig::new(MDNS_QUERY_INTERVAL);
+					(Some(mdns_config), Some(mdns_event_stream))
+				},
+				false => (None, None),
+			},
+			_ => panic!("memory transport not supported"),
+		};
+
+		let (kademlia_config, kademlia_handle) = {
+			let protocol_names = vec![
+				kademlia_protocol_name(genesis_hash.clone(), fork_id),
+				legacy_kademlia_protocol_name(protocol_id),
+			];
+
+			KademliaConfigBuilder::new()
+				.with_known_peers(known_peers)
+				.with_protocol_names(protocol_names)
+				.build()
+		};
+
+		(
+			Self {
+				ping_event_stream,
+				identify_event_stream,
+				mdns_event_stream,
+				kademlia_handle,
+				_peerstore_handle,
+				listen_addresses,
+				find_node_query_id: None,
+				pending_events: VecDeque::new(),
+				duration_to_next_find_query: Duration::from_secs(1),
+				address_confirmations: LruMap::new(ByLength::new(8)),
+				allow_non_global_addresses: config.allow_non_globals_in_dht,
+				public_addresses: config.public_addresses.iter().cloned().collect(),
+				next_kad_query: Some(Delay::new(KADEMLIA_QUERY_INTERVAL)),
+				local_protocols: HashSet::from_iter([
+					kademlia_protocol_name(genesis_hash, fork_id),
+					legacy_kademlia_protocol_name(protocol_id),
+				]),
+			},
+			ping_config,
+			identify_config,
+			kademlia_config,
+			mdns_config,
+		)
+	}
+
+	/// Add known peer to `Kademlia`.
+	#[allow(unused)]
+	pub async fn add_known_peer(&mut self, peer: PeerId, addresses: Vec<Multiaddr>) {
+		self.kademlia_handle.add_known_peer(peer, addresses).await;
+	}
+
+	/// Add self-reported addresses to routing table if `peer` supports
+	/// at least one of the locally supported DHT protocol.
+	pub async fn add_self_reported_address(
+		&mut self,
+		peer: PeerId,
+		supported_protocols: HashSet<ProtocolName>,
+		addresses: Vec<Multiaddr>,
+	) {
+		if self.local_protocols.is_disjoint(&supported_protocols) {
+			return
+		}
+
+		let addresses = addresses
+			.into_iter()
+			.filter_map(|address| {
+				if !self.allow_non_global_addresses && !Discovery::can_add_to_dht(&address) {
+					log::trace!(
+						target: LOG_TARGET,
+						"ignoring self-reported non-global address {address} from {peer}."
+					);
+
+					return None
+				}
+
+				Some(address)
+			})
+			.collect();
+
+		log::trace!(
+			target: LOG_TARGET,
+			"add self-reported addresses for {peer:?}: {addresses:?}",
+		);
+
+		self.kademlia_handle.add_known_peer(peer, addresses).await;
+	}
+
+	/// Start Kademlia `GET_VALUE` query for `key`.
+	pub async fn get_value(&mut self, key: KademliaKey) -> QueryId {
+		self.kademlia_handle
+			.get_record(RecordKey::new(&key.to_vec()), Quorum::One)
+			.await
+	}
+
+	/// Publish value on the DHT using Kademlia `PUT_VALUE`.
+	pub async fn put_value(&mut self, key: KademliaKey, value: Vec<u8>) -> QueryId {
+		self.kademlia_handle
+			.put_record(Record::new(RecordKey::new(&key.to_vec()), value))
+			.await
+	}
+
+	/// Check if the observed address is a known address.
+	fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool {
+		let mut known = known.iter();
+		let mut observed = observed.iter();
+
+		loop {
+			match (known.next(), observed.next()) {
+				(None, None) => return true,
+				(None, Some(Protocol::P2p(_))) => return true,
+				(Some(Protocol::P2p(_)), None) => return true,
+				(known, observed) if known != observed => return false,
+				_ => {},
+			}
+		}
+	}
+
+	/// Can `address` be added to DHT.
+	fn can_add_to_dht(address: &Multiaddr) -> bool {
+		let ip = match address.iter().next() {
+			Some(Protocol::Ip4(ip)) => IpNetwork::from(ip),
+			Some(Protocol::Ip6(ip)) => IpNetwork::from(ip),
+			Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) =>
+				return true,
+			_ => return false,
+		};
+
+		ip.is_global()
+	}
+
+	/// Check if `address` can be considered a new external address.
+	fn is_new_external_address(&mut self, address: &Multiaddr) -> bool {
+		log::trace!(target: LOG_TARGET, "verify new external address: {address}");
+
+		// is the address one of our known addresses
+		if self
+			.listen_addresses
+			.read()
+			.iter()
+			.chain(self.public_addresses.iter())
+			.any(|known_address| Discovery::is_known_address(&known_address, &address))
+		{
+			return true
+		}
+
+		match self.address_confirmations.get(address) {
+			Some(confirmations) => {
+				*confirmations += 1usize;
+
+				if *confirmations >= MIN_ADDRESS_CONFIRMATIONS {
+					return true
+				}
+			},
+			None => {
+				self.address_confirmations.insert(address.clone(), 1usize);
+			},
+		}
+
+		false
+	}
+}
+
+impl Stream for Discovery {
+	type Item = DiscoveryEvent;
+
+	fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+		let this = Pin::into_inner(self);
+
+		if let Some(event) = this.pending_events.pop_front() {
+			return Poll::Ready(Some(event))
+		}
+
+		if let Some(mut delay) = this.next_kad_query.take() {
+			match delay.poll_unpin(cx) {
+				Poll::Pending => {
+					this.next_kad_query = Some(delay);
+				},
+				Poll::Ready(()) => {
+					let peer = PeerId::random();
+
+					log::trace!(target: LOG_TARGET, "start next kademlia query for {peer:?}");
+
+					match this.kademlia_handle.try_find_node(peer) {
+						Ok(query_id) => {
+							this.find_node_query_id = Some(query_id);
+						},
+						Err(()) => {
+							this.duration_to_next_find_query = cmp::min(
+								this.duration_to_next_find_query * 2,
+								Duration::from_secs(60),
+							);
+							this.next_kad_query =
+								Some(Delay::new(this.duration_to_next_find_query));
+						},
+					}
+				},
+			}
+		}
+
+		match Pin::new(&mut this.kademlia_handle).poll_next(cx) {
+			Poll::Pending => {},
+			Poll::Ready(None) => return Poll::Ready(None),
+			Poll::Ready(Some(KademliaEvent::FindNodeSuccess { peers, .. })) => {
+				// the addresses are already inserted into the DHT and in `TransportManager` so
+				// there is no need to add them again. The found peers must be registered to
+				// `Peerstore` so other protocols are aware of them through `Peerset`.
+				log::trace!(target: LOG_TARGET, "dht random walk yielded {} peers", peers.len());
+
+				this.next_kad_query = Some(Delay::new(KADEMLIA_QUERY_INTERVAL));
+
+				return Poll::Ready(Some(DiscoveryEvent::RoutingTableUpdate {
+					peers: peers.into_iter().map(|(peer, _)| peer).collect(),
+				}))
+			},
+			Poll::Ready(Some(KademliaEvent::RoutingTableUpdate { peers })) => {
+				log::trace!(target: LOG_TARGET, "routing table update, discovered {} peers", peers.len());
+
+				return Poll::Ready(Some(DiscoveryEvent::RoutingTableUpdate {
+					peers: peers.into_iter().collect(),
+				}))
+			},
+			Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, record })) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"`GET_RECORD` succeeded for {query_id:?}: {record:?}",
+				);
+
+				return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, record }));
+			},
+			Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) =>
+				return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })),
+			Poll::Ready(Some(KademliaEvent::QueryFailed { query_id })) => {
+				match this.find_node_query_id == Some(query_id) {
+					true => {
+						this.find_node_query_id = None;
+						this.duration_to_next_find_query =
+							cmp::min(this.duration_to_next_find_query * 2, Duration::from_secs(60));
+						this.next_kad_query = Some(Delay::new(this.duration_to_next_find_query));
+					},
+					false => return Poll::Ready(Some(DiscoveryEvent::QueryFailed { query_id })),
+				}
+			},
+		}
+
+		match Pin::new(&mut this.identify_event_stream).poll_next(cx) {
+			Poll::Pending => {},
+			Poll::Ready(None) => return Poll::Ready(None),
+			Poll::Ready(Some(IdentifyEvent::PeerIdentified {
+				peer,
+				protocol_version,
+				user_agent,
+				listen_addresses,
+				supported_protocols,
+				observed_address,
+			})) => {
+				if this.is_new_external_address(&observed_address) {
+					this.pending_events.push_back(DiscoveryEvent::ExternalAddressDiscovered {
+						address: observed_address.clone(),
+					});
+				}
+
+				return Poll::Ready(Some(DiscoveryEvent::Identified {
+					peer,
+					protocol_version,
+					user_agent,
+					listen_addresses,
+					observed_address,
+					supported_protocols,
+				}));
+			},
+		}
+
+		match Pin::new(&mut this.ping_event_stream).poll_next(cx) {
+			Poll::Pending => {},
+			Poll::Ready(None) => return Poll::Ready(None),
+			Poll::Ready(Some(PingEvent::Ping { peer, ping })) =>
+				return Poll::Ready(Some(DiscoveryEvent::Ping { peer, rtt: ping })),
+		}
+
+		if let Some(ref mut mdns_event_stream) = &mut this.mdns_event_stream {
+			match Pin::new(mdns_event_stream).poll_next(cx) {
+				Poll::Pending => {},
+				Poll::Ready(None) => return Poll::Ready(None),
+				Poll::Ready(Some(MdnsEvent::Discovered(addresses))) =>
+					return Poll::Ready(Some(DiscoveryEvent::Discovered { addresses })),
+			}
+		}
+
+		Poll::Pending
+	}
+}
diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..1137c73b56db8292fdee437ee3a656f38e789f51
--- /dev/null
+++ b/substrate/client/network/src/litep2p/mod.rs
@@ -0,0 +1,989 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! `NetworkBackend` implementation for `litep2p`.
+
+use crate::{
+	config::{
+		FullNetworkConfiguration, IncomingRequest, NodeKeyConfig, NotificationHandshake, Params,
+		SetConfig, TransportConfig,
+	},
+	error::Error,
+	event::{DhtEvent, Event},
+	litep2p::{
+		discovery::{Discovery, DiscoveryEvent},
+		peerstore::Peerstore,
+		service::{Litep2pNetworkService, NetworkServiceCommand},
+		shim::{
+			bitswap::BitswapServer,
+			notification::{
+				config::{NotificationProtocolConfig, ProtocolControlHandle},
+				peerset::PeersetCommand,
+			},
+			request_response::{RequestResponseConfig, RequestResponseProtocol},
+		},
+	},
+	multiaddr::{Multiaddr, Protocol},
+	peer_store::PeerStoreProvider,
+	protocol,
+	service::{
+		metrics::{register_without_sources, MetricSources, Metrics, NotificationMetrics},
+		out_events,
+		traits::{BandwidthSink, NetworkBackend, NetworkService},
+	},
+	NetworkStatus, NotificationService, ProtocolName,
+};
+
+use codec::Encode;
+use futures::StreamExt;
+use libp2p::kad::RecordKey;
+use litep2p::{
+	config::ConfigBuilder,
+	crypto::ed25519::{Keypair, SecretKey},
+	executor::Executor,
+	protocol::{
+		libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId},
+		request_response::ConfigBuilder as RequestResponseConfigBuilder,
+	},
+	transport::{
+		tcp::config::Config as TcpTransportConfig,
+		websocket::config::Config as WebSocketTransportConfig, Endpoint,
+	},
+	types::ConnectionId,
+	Error as Litep2pError, Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName,
+};
+use parking_lot::RwLock;
+use prometheus_endpoint::Registry;
+
+use sc_client_api::BlockBackend;
+use sc_network_common::{role::Roles, ExHashT};
+use sc_network_types::PeerId;
+use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
+use sp_runtime::traits::Block as BlockT;
+
+use std::{
+	cmp,
+	collections::{hash_map::Entry, HashMap, HashSet},
+	fs,
+	future::Future,
+	io, iter,
+	pin::Pin,
+	sync::{
+		atomic::{AtomicUsize, Ordering},
+		Arc,
+	},
+	time::{Duration, Instant},
+};
+
+mod discovery;
+mod peerstore;
+mod service;
+mod shim;
+
+/// Litep2p bandwidth sink.
+struct Litep2pBandwidthSink {
+	sink: litep2p::BandwidthSink,
+}
+
+impl BandwidthSink for Litep2pBandwidthSink {
+	fn total_inbound(&self) -> u64 {
+		self.sink.inbound() as u64
+	}
+
+	fn total_outbound(&self) -> u64 {
+		self.sink.outbound() as u64
+	}
+}
+
+/// Litep2p task executor.
+struct Litep2pExecutor {
+	/// Executor.
+	executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send + Sync>,
+}
+
+impl Executor for Litep2pExecutor {
+	fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
+		(self.executor)(future)
+	}
+
+	fn run_with_name(&self, _: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
+		(self.executor)(future)
+	}
+}
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
+/// Peer context.
+struct ConnectionContext {
+	/// Peer endpoints.
+	endpoints: HashMap<ConnectionId, Endpoint>,
+
+	/// Number of active connections.
+	num_connections: usize,
+}
+
+/// Networking backend for `litep2p`.
+pub struct Litep2pNetworkBackend {
+	/// Main `litep2p` object.
+	litep2p: Litep2p,
+
+	/// `NetworkService` implementation for `Litep2pNetworkBackend`.
+	network_service: Arc<dyn NetworkService>,
+
+	/// RX channel for receiving commands from `Litep2pNetworkService`.
+	cmd_rx: TracingUnboundedReceiver<NetworkServiceCommand>,
+
+	/// `Peerset` handles to notification protocols.
+	peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
+
+	/// Pending `GET_VALUE` queries.
+	pending_get_values: HashMap<QueryId, (RecordKey, Instant)>,
+
+	/// Pending `PUT_VALUE` queries.
+	pending_put_values: HashMap<QueryId, (RecordKey, Instant)>,
+
+	/// Discovery.
+	discovery: Discovery,
+
+	/// Number of connected peers.
+	num_connected: Arc<AtomicUsize>,
+
+	/// Connected peers.
+	peers: HashMap<litep2p::PeerId, ConnectionContext>,
+
+	/// Peerstore.
+	peerstore_handle: Arc<dyn PeerStoreProvider>,
+
+	/// Block announce protocol name.
+	block_announce_protocol: ProtocolName,
+
+	/// Sender for DHT events.
+	event_streams: out_events::OutChannels,
+
+	/// Prometheus metrics.
+	metrics: Option<Metrics>,
+
+	/// External addresses.
+	external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+}
+
+impl Litep2pNetworkBackend {
+	/// From an iterator of multiaddress(es), parse and group all addresses of peers
+	/// so that litep2p can consume the information easily.
+	fn parse_addresses(
+		addresses: impl Iterator<Item = Multiaddr>,
+	) -> HashMap<PeerId, Vec<Multiaddr>> {
+		addresses
+			.into_iter()
+			.filter_map(|address| match address.iter().next() {
+				Some(
+					Protocol::Dns(_) |
+					Protocol::Dns4(_) |
+					Protocol::Dns6(_) |
+					Protocol::Ip6(_) |
+					Protocol::Ip4(_),
+				) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_)))
+				{
+					Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash)
+						.map_or(None, |peer| Some((peer, Some(address)))),
+					_ => None,
+				},
+				Some(Protocol::P2p(multihash)) =>
+					PeerId::from_multihash(multihash).map_or(None, |peer| Some((peer, None))),
+				_ => None,
+			})
+			.fold(HashMap::new(), |mut acc, (peer, maybe_address)| {
+				let entry = acc.entry(peer).or_default();
+				maybe_address.map(|address| entry.push(address));
+
+				acc
+			})
+	}
+
+	/// Add new known addresses to `litep2p` and return the parsed peer IDs.
+	fn add_addresses(&mut self, peers: impl Iterator<Item = Multiaddr>) -> HashSet<PeerId> {
+		Self::parse_addresses(peers.into_iter())
+			.into_iter()
+			.filter_map(|(peer, addresses)| {
+				// `peers` contained multiaddress in the form `/p2p/<peer ID>`
+				if addresses.is_empty() {
+					return Some(peer)
+				}
+
+				if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) == 0 {
+					log::warn!(
+						target: LOG_TARGET,
+						"couldn't add any addresses for {peer:?} and it won't be added as reserved peer",
+					);
+					return None
+				}
+
+				self.peerstore_handle.add_known_peer(peer);
+				Some(peer)
+			})
+			.collect()
+	}
+}
+
+impl Litep2pNetworkBackend {
+	/// Get `litep2p` keypair from `NodeKeyConfig`.
+	fn get_keypair(node_key: &NodeKeyConfig) -> Result<(Keypair, litep2p::PeerId), Error> {
+		let secret = libp2p::identity::Keypair::try_into_ed25519(node_key.clone().into_keypair()?)
+			.map_err(|error| {
+				log::error!(target: LOG_TARGET, "failed to convert to ed25519: {error:?}");
+				Error::Io(io::ErrorKind::InvalidInput.into())
+			})?
+			.secret();
+
+		let mut secret = secret.as_ref().iter().cloned().collect::<Vec<_>>();
+		let secret = SecretKey::from_bytes(&mut secret)
+			.map_err(|_| Error::Io(io::ErrorKind::InvalidInput.into()))?;
+		let local_identity = Keypair::from(secret);
+		let local_public = local_identity.public();
+		let local_peer_id = local_public.to_peer_id();
+
+		Ok((local_identity, local_peer_id))
+	}
+
+	/// Configure transport protocols for `Litep2pNetworkBackend`.
+	fn configure_transport<B: BlockT + 'static, H: ExHashT>(
+		config: &FullNetworkConfiguration<B, H, Self>,
+	) -> ConfigBuilder {
+		let _ = match config.network_config.transport {
+			TransportConfig::MemoryOnly => panic!("memory transport not supported"),
+			TransportConfig::Normal { .. } => false,
+		};
+		let config_builder = ConfigBuilder::new();
+
+		// The yamux buffer size limit is configured to be equal to the maximum frame size
+		// of all protocols. 10 bytes are added to each limit for the length prefix that
+		// is not included in the upper layer protocols limit but is still present in the
+		// yamux buffer. These 10 bytes correspond to the maximum size required to encode
+		// a variable-length-encoding 64bits number. In other words, we make the
+		// assumption that no notification larger than 2^64 will ever be sent.
+		let yamux_maximum_buffer_size = {
+			let requests_max = config
+				.request_response_protocols
+				.iter()
+				.map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX));
+			let responses_max = config
+				.request_response_protocols
+				.iter()
+				.map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX));
+			let notifs_max = config
+				.notification_protocols
+				.iter()
+				.map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX));
+
+			// A "default" max is added to cover all the other protocols: ping, identify,
+			// kademlia, block announces, and transactions.
+			let default_max = cmp::max(
+				1024 * 1024,
+				usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE)
+					.unwrap_or(usize::MAX),
+			);
+
+			iter::once(default_max)
+				.chain(requests_max)
+				.chain(responses_max)
+				.chain(notifs_max)
+				.max()
+				.expect("iterator known to always yield at least one element; qed")
+				.saturating_add(10)
+		};
+
+		let yamux_config = {
+			let mut yamux_config = litep2p::yamux::Config::default();
+			// Enable proper flow-control: window updates are only sent when
+			// buffered data has been consumed.
+			yamux_config.set_window_update_mode(litep2p::yamux::WindowUpdateMode::OnRead);
+			yamux_config.set_max_buffer_size(yamux_maximum_buffer_size);
+
+			if let Some(yamux_window_size) = config.network_config.yamux_window_size {
+				yamux_config.set_receive_window(yamux_window_size);
+			}
+
+			yamux_config
+		};
+
+		let (tcp, websocket): (Vec<Option<_>>, Vec<Option<_>>) = config
+			.network_config
+			.listen_addresses
+			.iter()
+			.filter_map(|address| {
+				let mut iter = address.iter();
+
+				match iter.next() {
+					Some(Protocol::Ip4(_) | Protocol::Ip6(_)) => {},
+					protocol => {
+						log::error!(
+							target: LOG_TARGET,
+							"unknown protocol {protocol:?}, ignoring {address:?}",
+						);
+
+						return None
+					},
+				}
+
+				match iter.next() {
+					Some(Protocol::Tcp(_)) => match iter.next() {
+						Some(Protocol::Ws(_) | Protocol::Wss(_)) =>
+							Some((None, Some(address.clone()))),
+						Some(Protocol::P2p(_)) | None => Some((Some(address.clone()), None)),
+						protocol => {
+							log::error!(
+								target: LOG_TARGET,
+								"unknown protocol {protocol:?}, ignoring {address:?}",
+							);
+							None
+						},
+					},
+					protocol => {
+						log::error!(
+							target: LOG_TARGET,
+							"unknown protocol {protocol:?}, ignoring {address:?}",
+						);
+						None
+					},
+				}
+			})
+			.unzip();
+
+		config_builder
+			.with_websocket(WebSocketTransportConfig {
+				listen_addresses: websocket.into_iter().flatten().collect(),
+				yamux_config: yamux_config.clone(),
+				..Default::default()
+			})
+			.with_tcp(TcpTransportConfig {
+				listen_addresses: tcp.into_iter().flatten().collect(),
+				yamux_config,
+				..Default::default()
+			})
+	}
+}
+
+#[async_trait::async_trait]
+impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBackend {
+	type NotificationProtocolConfig = NotificationProtocolConfig;
+	type RequestResponseProtocolConfig = RequestResponseConfig;
+	type NetworkService<Block, Hash> = Arc<Litep2pNetworkService>;
+	type PeerStore = Peerstore;
+	type BitswapConfig = BitswapConfig;
+
+	fn new(mut params: Params<B, H, Self>) -> Result<Self, Error>
+	where
+		Self: Sized,
+	{
+		let (keypair, local_peer_id) =
+			Self::get_keypair(&params.network_config.network_config.node_key)?;
+		let (cmd_tx, cmd_rx) = tracing_unbounded("mpsc_network_worker", 100_000);
+
+		params.network_config.network_config.boot_nodes = params
+			.network_config
+			.network_config
+			.boot_nodes
+			.into_iter()
+			.filter(|boot_node| boot_node.peer_id != local_peer_id.into())
+			.collect();
+		params.network_config.network_config.default_peers_set.reserved_nodes = params
+			.network_config
+			.network_config
+			.default_peers_set
+			.reserved_nodes
+			.into_iter()
+			.filter(|reserved_node| {
+				if reserved_node.peer_id == local_peer_id.into() {
+					log::warn!(
+						target: LOG_TARGET,
+						"Local peer ID used in reserved node, ignoring: {reserved_node}",
+					);
+					false
+				} else {
+					true
+				}
+			})
+			.collect();
+
+		if let Some(path) = &params.network_config.network_config.net_config_path {
+			fs::create_dir_all(path)?;
+		}
+
+		log::info!(target: LOG_TARGET, "Local node identity is: {local_peer_id}");
+		log::info!(target: LOG_TARGET, "Running litep2p network backend");
+
+		params.network_config.sanity_check_addresses()?;
+		params.network_config.sanity_check_bootnodes()?;
+
+		let mut config_builder =
+			Self::configure_transport(&params.network_config).with_keypair(keypair.clone());
+		let known_addresses = params.network_config.known_addresses();
+		let peer_store_handle = params.network_config.peer_store_handle();
+		let executor = Arc::new(Litep2pExecutor { executor: params.executor });
+
+		let FullNetworkConfiguration {
+			notification_protocols,
+			request_response_protocols,
+			network_config,
+			..
+		} = params.network_config;
+
+		// initialize notification protocols
+		//
+		// pass the protocol configuration to `Litep2pConfigBuilder` and save the TX channel
+		// to the protocol's `Peerset` together with the protocol name to allow other subsystems
+		// of Polkadot SDK to control connectivity of the notification protocol
+		let block_announce_protocol = params.block_announce_config.protocol_name().clone();
+		let mut notif_protocols = HashMap::from_iter([(
+			params.block_announce_config.protocol_name().clone(),
+			params.block_announce_config.handle,
+		)]);
+
+		// handshake for all but the syncing protocol is set to node role
+		config_builder = notification_protocols
+			.into_iter()
+			.fold(config_builder, |config_builder, mut config| {
+				config.config.set_handshake(Roles::from(&params.role).encode());
+				notif_protocols.insert(config.protocol_name, config.handle);
+
+				config_builder.with_notification_protocol(config.config)
+			})
+			.with_notification_protocol(params.block_announce_config.config);
+
+		// initialize request-response protocols
+		let metrics = match &params.metrics_registry {
+			Some(registry) => Some(register_without_sources(registry)?),
+			None => None,
+		};
+
+		// create channels that are used to send request before initializing protocols so the
+		// senders can be passed onto all request-response protocols
+		//
+		// all protocols must have each others' senders so they can send the fallback request in
+		// case the main protocol is not supported by the remote peer and user specified a fallback
+		let (mut request_response_receivers, request_response_senders): (
+			HashMap<_, _>,
+			HashMap<_, _>,
+		) = request_response_protocols
+			.iter()
+			.map(|config| {
+				let (tx, rx) = tracing_unbounded("outbound-requests", 10_000);
+				((config.protocol_name.clone(), rx), (config.protocol_name.clone(), tx))
+			})
+			.unzip();
+
+		config_builder = request_response_protocols.into_iter().fold(
+			config_builder,
+			|config_builder, config| {
+				let (protocol_config, handle) = RequestResponseConfigBuilder::new(
+					Litep2pProtocolName::from(config.protocol_name.clone()),
+				)
+				.with_max_size(cmp::max(config.max_request_size, config.max_response_size) as usize)
+				.with_fallback_names(config.fallback_names.into_iter().map(From::from).collect())
+				.with_timeout(config.request_timeout)
+				.build();
+
+				let protocol = RequestResponseProtocol::new(
+					config.protocol_name.clone(),
+					handle,
+					Arc::clone(&peer_store_handle),
+					config.inbound_queue,
+					request_response_receivers
+						.remove(&config.protocol_name)
+						.expect("receiver exists as it was just added and there are no duplicate protocols; qed"),
+					request_response_senders.clone(),
+					metrics.clone(),
+				);
+
+				executor.run(Box::pin(async move {
+					protocol.run().await;
+				}));
+
+				config_builder.with_request_response_protocol(protocol_config)
+			},
+		);
+
+		// collect known addresses
+		let known_addresses: HashMap<litep2p::PeerId, Vec<Multiaddr>> =
+			known_addresses.into_iter().fold(HashMap::new(), |mut acc, (peer, address)| {
+				let address = match address.iter().last() {
+					Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) =>
+						address.with(Protocol::P2p(peer.into())),
+					Some(Protocol::P2p(_)) => address,
+					_ => return acc,
+				};
+
+				acc.entry(peer.into()).or_default().push(address);
+				peer_store_handle.add_known_peer(peer);
+
+				acc
+			});
+
+		// enable ipfs ping, identify and kademlia, and potentially mdns if user enabled it
+		let listen_addresses = Arc::new(Default::default());
+		let (discovery, ping_config, identify_config, kademlia_config, maybe_mdns_config) =
+			Discovery::new(
+				&network_config,
+				params.genesis_hash,
+				params.fork_id.as_deref(),
+				&params.protocol_id,
+				known_addresses.clone(),
+				Arc::clone(&listen_addresses),
+				Arc::clone(&peer_store_handle),
+			);
+
+		config_builder = config_builder
+			.with_known_addresses(known_addresses.clone().into_iter())
+			.with_libp2p_ping(ping_config)
+			.with_libp2p_identify(identify_config)
+			.with_libp2p_kademlia(kademlia_config)
+			.with_executor(executor);
+
+		if let Some(config) = maybe_mdns_config {
+			config_builder = config_builder.with_mdns(config);
+		}
+
+		if let Some(config) = params.bitswap_config {
+			config_builder = config_builder.with_libp2p_bitswap(config);
+		}
+
+		let litep2p =
+			Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?;
+
+		let external_addresses: Arc<RwLock<HashSet<Multiaddr>>> = Arc::new(RwLock::new(
+			HashSet::from_iter(network_config.public_addresses.iter().cloned()),
+		));
+		litep2p.listen_addresses().for_each(|address| {
+			log::debug!(target: LOG_TARGET, "listening on: {address}");
+
+			listen_addresses.write().insert(address.clone());
+		});
+
+		let network_service = Arc::new(Litep2pNetworkService::new(
+			local_peer_id,
+			keypair.clone(),
+			cmd_tx,
+			Arc::clone(&peer_store_handle),
+			notif_protocols.clone(),
+			block_announce_protocol.clone(),
+			request_response_senders,
+			Arc::clone(&listen_addresses),
+			Arc::clone(&external_addresses),
+		));
+
+		// register rest of the metrics now that `Litep2p` has been created
+		let num_connected = Arc::new(Default::default());
+		let bandwidth: Arc<dyn BandwidthSink> =
+			Arc::new(Litep2pBandwidthSink { sink: litep2p.bandwidth_sink() });
+
+		if let Some(registry) = &params.metrics_registry {
+			MetricSources::register(registry, bandwidth, Arc::clone(&num_connected))?;
+		}
+
+		Ok(Self {
+			network_service,
+			cmd_rx,
+			metrics,
+			peerset_handles: notif_protocols,
+			num_connected,
+			discovery,
+			pending_put_values: HashMap::new(),
+			pending_get_values: HashMap::new(),
+			peerstore_handle: peer_store_handle,
+			block_announce_protocol,
+			event_streams: out_events::OutChannels::new(None)?,
+			peers: HashMap::new(),
+			litep2p,
+			external_addresses,
+		})
+	}
+
+	fn network_service(&self) -> Arc<dyn NetworkService> {
+		Arc::clone(&self.network_service)
+	}
+
+	fn peer_store(bootnodes: Vec<sc_network_types::PeerId>) -> Self::PeerStore {
+		Peerstore::new(bootnodes)
+	}
+
+	fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics {
+		NotificationMetrics::new(registry)
+	}
+
+	/// Create Bitswap server.
+	fn bitswap_server(
+		client: Arc<dyn BlockBackend<B> + Send + Sync>,
+	) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig) {
+		BitswapServer::new(client)
+	}
+
+	/// Create notification protocol configuration for `protocol`.
+	fn notification_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_notification_size: u64,
+		handshake: Option<NotificationHandshake>,
+		set_config: SetConfig,
+		metrics: NotificationMetrics,
+		peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>) {
+		Self::NotificationProtocolConfig::new(
+			protocol_name,
+			fallback_names,
+			max_notification_size as usize,
+			handshake,
+			set_config,
+			metrics,
+			peerstore_handle,
+		)
+	}
+
+	/// Create request-response protocol configuration.
+	fn request_response_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_request_size: u64,
+		max_response_size: u64,
+		request_timeout: Duration,
+		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+	) -> Self::RequestResponseProtocolConfig {
+		Self::RequestResponseProtocolConfig::new(
+			protocol_name,
+			fallback_names,
+			max_request_size,
+			max_response_size,
+			request_timeout,
+			inbound_queue,
+		)
+	}
+
+	/// Start [`Litep2pNetworkBackend`] event loop.
+	async fn run(mut self) {
+		log::debug!(target: LOG_TARGET, "starting litep2p network backend");
+
+		loop {
+			let num_connected_peers = self
+				.peerset_handles
+				.get(&self.block_announce_protocol)
+				.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed));
+			self.num_connected.store(num_connected_peers, Ordering::Relaxed);
+
+			tokio::select! {
+				command = self.cmd_rx.next() => match command {
+					None => return,
+					Some(command) => match command {
+						NetworkServiceCommand::GetValue{ key } => {
+							let query_id = self.discovery.get_value(key.clone()).await;
+							self.pending_get_values.insert(query_id, (key, Instant::now()));
+						}
+						NetworkServiceCommand::PutValue { key, value } => {
+							let query_id = self.discovery.put_value(key.clone(), value).await;
+							self.pending_put_values.insert(query_id, (key, Instant::now()));
+						}
+						NetworkServiceCommand::EventStream { tx } => {
+							self.event_streams.push(tx);
+						}
+						NetworkServiceCommand::Status { tx } => {
+							let _ = tx.send(NetworkStatus {
+								num_connected_peers: self
+									.peerset_handles
+									.get(&self.block_announce_protocol)
+									.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed)),
+								total_bytes_inbound: self.litep2p.bandwidth_sink().inbound() as u64,
+								total_bytes_outbound: self.litep2p.bandwidth_sink().outbound() as u64,
+							});
+						}
+						NetworkServiceCommand::AddPeersToReservedSet {
+							protocol,
+							peers,
+						} => {
+							let peers = self.add_addresses(peers.into_iter());
+
+							match self.peerset_handles.get(&protocol) {
+								Some(handle) => {
+									let _ = handle.tx.unbounded_send(PeersetCommand::AddReservedPeers { peers });
+								}
+								None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
+							};
+						}
+						NetworkServiceCommand::AddKnownAddress { peer, mut address } => {
+							if !address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))) {
+								address.push(Protocol::P2p(peer.into()));
+							}
+
+							if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize {
+								log::warn!(
+									target: LOG_TARGET,
+									"couldn't add known address ({address}) for {peer:?}, unsupported transport"
+								);
+							}
+						},
+						NetworkServiceCommand::SetReservedPeers { protocol, peers } => {
+							let peers = self.add_addresses(peers.into_iter());
+
+							match self.peerset_handles.get(&protocol) {
+								Some(handle) => {
+									let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedPeers { peers });
+								}
+								None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
+							}
+
+						},
+						NetworkServiceCommand::DisconnectPeer {
+							protocol,
+							peer,
+						} => {
+							let Some(handle) = self.peerset_handles.get(&protocol) else {
+								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
+								continue
+							};
+
+							let _ = handle.tx.unbounded_send(PeersetCommand::DisconnectPeer { peer });
+						}
+						NetworkServiceCommand::SetReservedOnly {
+							protocol,
+							reserved_only,
+						} => {
+							let Some(handle) = self.peerset_handles.get(&protocol) else {
+								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
+								continue
+							};
+
+							let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedOnly { reserved_only });
+						}
+						NetworkServiceCommand::RemoveReservedPeers {
+							protocol,
+							peers,
+						} => {
+							let Some(handle) = self.peerset_handles.get(&protocol) else {
+								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
+								continue
+							};
+
+							let _ = handle.tx.unbounded_send(PeersetCommand::RemoveReservedPeers { peers });
+						}
+					}
+				},
+				event = self.discovery.next() => match event {
+					None => return,
+					Some(DiscoveryEvent::Discovered { addresses }) => {
+						// if at least one address was added for the peer, report the peer to `Peerstore`
+						for (peer, addresses) in Litep2pNetworkBackend::parse_addresses(addresses.into_iter()) {
+							if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) > 0 {
+								self.peerstore_handle.add_known_peer(peer);
+							}
+						}
+					}
+					Some(DiscoveryEvent::RoutingTableUpdate { peers }) => {
+						for peer in peers {
+							self.peerstore_handle.add_known_peer(peer.into());
+						}
+					}
+					Some(DiscoveryEvent::GetRecordSuccess { query_id, record }) => {
+						match self.pending_get_values.remove(&query_id) {
+							None => log::warn!(
+								target: LOG_TARGET,
+								"`GET_VALUE` succeeded for a non-existent query",
+							),
+							Some((_key, started)) => {
+								log::trace!(
+									target: LOG_TARGET,
+									"`GET_VALUE` for {:?} ({query_id:?}) succeeded",
+									record.key,
+								);
+
+								self.event_streams.send(Event::Dht(
+									DhtEvent::ValueFound(vec![
+										(libp2p::kad::RecordKey::new(&record.key), record.value)
+									])
+								));
+
+								if let Some(ref metrics) = self.metrics {
+									metrics
+										.kademlia_query_duration
+										.with_label_values(&["value-get"])
+										.observe(started.elapsed().as_secs_f64());
+								}
+							}
+						}
+					}
+					Some(DiscoveryEvent::PutRecordSuccess { query_id }) => {
+						match self.pending_put_values.remove(&query_id) {
+							None => log::warn!(
+								target: LOG_TARGET,
+								"`PUT_VALUE` succeeded for a non-existent query",
+							),
+							Some((key, started)) => {
+								log::trace!(
+									target: LOG_TARGET,
+									"`PUT_VALUE` for {key:?} ({query_id:?}) succeeded",
+								);
+
+								if let Some(ref metrics) = self.metrics {
+									metrics
+										.kademlia_query_duration
+										.with_label_values(&["value-put"])
+										.observe(started.elapsed().as_secs_f64());
+								}
+							}
+						}
+					}
+					Some(DiscoveryEvent::QueryFailed { query_id }) => {
+						match self.pending_get_values.remove(&query_id) {
+							None => match self.pending_put_values.remove(&query_id) {
+								None => log::warn!(
+									target: LOG_TARGET,
+									"non-existent query failed ({query_id:?})",
+								),
+								Some((key, started)) => {
+									log::debug!(
+										target: LOG_TARGET,
+										"`PUT_VALUE` ({query_id:?}) failed for key {key:?}",
+									);
+
+									self.event_streams.send(Event::Dht(
+										DhtEvent::ValuePutFailed(libp2p::kad::RecordKey::new(&key))
+									));
+
+									if let Some(ref metrics) = self.metrics {
+										metrics
+											.kademlia_query_duration
+											.with_label_values(&["value-put-failed"])
+											.observe(started.elapsed().as_secs_f64());
+									}
+								}
+							}
+							Some((key, started)) => {
+								log::debug!(
+									target: LOG_TARGET,
+									"`GET_VALUE` ({query_id:?}) failed for key {key:?}",
+								);
+
+								self.event_streams.send(Event::Dht(
+									DhtEvent::ValueNotFound(libp2p::kad::RecordKey::new(&key))
+								));
+
+								if let Some(ref metrics) = self.metrics {
+									metrics
+										.kademlia_query_duration
+										.with_label_values(&["value-get-failed"])
+										.observe(started.elapsed().as_secs_f64());
+								}
+							}
+						}
+					}
+					Some(DiscoveryEvent::Identified { peer, listen_addresses, supported_protocols, .. }) => {
+						self.discovery.add_self_reported_address(peer, supported_protocols, listen_addresses).await;
+					}
+					Some(DiscoveryEvent::ExternalAddressDiscovered { address }) => {
+						let mut addresses = self.external_addresses.write();
+
+						if addresses.insert(address.clone()) {
+							log::info!(target: LOG_TARGET, "discovered new external address for our node: {address}");
+						}
+					}
+					Some(DiscoveryEvent::Ping { peer, rtt }) => {
+						log::trace!(
+							target: LOG_TARGET,
+							"ping time with {peer:?}: {rtt:?}",
+						);
+					}
+				},
+				event = self.litep2p.next_event() => match event {
+					Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }) => {
+						let Some(metrics) = &self.metrics else {
+							continue;
+						};
+
+						let direction = match endpoint {
+							Endpoint::Dialer { .. } => "out",
+							Endpoint::Listener { .. } => "in",
+						};
+						metrics.connections_opened_total.with_label_values(&[direction]).inc();
+
+						match self.peers.entry(peer) {
+							Entry::Vacant(entry) => {
+								entry.insert(ConnectionContext {
+									endpoints: HashMap::from_iter([(endpoint.connection_id(), endpoint)]),
+									num_connections: 1usize,
+								});
+								metrics.distinct_peers_connections_opened_total.inc();
+							}
+							Entry::Occupied(entry) => {
+								let entry = entry.into_mut();
+								entry.num_connections += 1;
+								entry.endpoints.insert(endpoint.connection_id(), endpoint);
+							}
+						}
+					}
+					Some(Litep2pEvent::ConnectionClosed { peer, connection_id }) => {
+						let Some(metrics) = &self.metrics else {
+							continue;
+						};
+
+						let Some(context) = self.peers.get_mut(&peer) else {
+							log::debug!(target: LOG_TARGET, "unknown peer disconnected: {peer:?} ({connection_id:?})");
+							continue
+						};
+
+						let direction = match context.endpoints.remove(&connection_id) {
+							None => {
+								log::debug!(target: LOG_TARGET, "connection {connection_id:?} doesn't exist for {peer:?} ");
+								continue
+							}
+							Some(endpoint) => {
+								context.num_connections -= 1;
+
+								match endpoint {
+									Endpoint::Dialer { .. } => "out",
+									Endpoint::Listener { .. } => "in",
+								}
+							}
+						};
+
+						metrics.connections_closed_total.with_label_values(&[direction, "actively-closed"]).inc();
+
+						if context.num_connections == 0 {
+							self.peers.remove(&peer);
+							metrics.distinct_peers_connections_closed_total.inc();
+						}
+					}
+					Some(Litep2pEvent::DialFailure { address, error }) => {
+						log::trace!(
+							target: LOG_TARGET,
+							"failed to dial peer at {address:?}: {error:?}",
+						);
+
+						let reason = match error {
+							Litep2pError::PeerIdMismatch(_, _) => "invalid-peer-id",
+							Litep2pError::Timeout | Litep2pError::TransportError(_) |
+							Litep2pError::IoError(_) | Litep2pError::WebSocket(_) => "transport-error",
+							_ => "other",
+						};
+
+						if let Some(metrics) = &self.metrics {
+							metrics.pending_connections_errors_total.with_label_values(&[reason]).inc();
+						}
+					}
+					_ => {}
+				},
+			}
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/peerstore.rs b/substrate/client/network/src/litep2p/peerstore.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dd377ea09af9b3c9769f06bdba8078a4fbc41a8e
--- /dev/null
+++ b/substrate/client/network/src/litep2p/peerstore.rs
@@ -0,0 +1,391 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! `Peerstore` implementation for `litep2p`.
+//!
+//! `Peerstore` is responsible for storing information about remote peers
+//! such as their addresses, reputations, supported protocols etc.
+
+use crate::{
+	peer_store::{PeerStoreProvider, ProtocolHandle},
+	service::traits::PeerStore,
+	ObservedRole, ReputationChange,
+};
+
+use parking_lot::Mutex;
+use wasm_timer::Delay;
+
+use sc_network_types::PeerId;
+
+use std::{
+	collections::{HashMap, HashSet},
+	sync::Arc,
+	time::{Duration, Instant},
+};
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::peerstore";
+
+/// We don't accept nodes whose reputation is under this value.
+pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100);
+
+/// Relative decrement of a reputation value that is applied every second. I.e., for inverse
+/// decrement of 50 we decrease absolute value of the reputation by 1/50. This corresponds to a
+/// factor of `k = 0.98`. It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half,
+/// or 34.3 seconds for the values above. In this setup the maximum allowed absolute value of
+/// `i32::MAX` becomes 0 in ~1100 seconds (actually less due to integer arithmetic).
+const INVERSE_DECREMENT: i32 = 50;
+
+/// Amount of time between the moment we last updated the [`PeerStore`] entry and the moment we
+/// remove it, once the reputation value reaches 0.
+const FORGET_AFTER: Duration = Duration::from_secs(3600);
+
+/// Peer information.
+#[derive(Debug, Clone, Copy)]
+struct PeerInfo {
+	/// Reputation of the peer.
+	reputation: i32,
+
+	/// Instant when the peer was last updated.
+	last_updated: Instant,
+
+	/// Role of the peer, if known.
+	role: Option<ObservedRole>,
+}
+
+impl Default for PeerInfo {
+	fn default() -> Self {
+		Self { reputation: 0i32, last_updated: Instant::now(), role: None }
+	}
+}
+
+impl PeerInfo {
+	fn is_banned(&self) -> bool {
+		self.reputation < BANNED_THRESHOLD
+	}
+
+	fn decay_reputation(&mut self, seconds_passed: u64) {
+		// Note that decaying the reputation value happens "on its own",
+		// so we don't do `bump_last_updated()`.
+		for _ in 0..seconds_passed {
+			let mut diff = self.reputation / INVERSE_DECREMENT;
+			if diff == 0 && self.reputation < 0 {
+				diff = -1;
+			} else if diff == 0 && self.reputation > 0 {
+				diff = 1;
+			}
+
+			self.reputation = self.reputation.saturating_sub(diff);
+
+			if self.reputation == 0 {
+				break
+			}
+		}
+	}
+}
+
+#[derive(Debug, Default)]
+pub struct PeerstoreHandleInner {
+	peers: HashMap<PeerId, PeerInfo>,
+	protocols: Vec<Arc<dyn ProtocolHandle>>,
+}
+
+#[derive(Debug, Clone, Default)]
+pub struct PeerstoreHandle(Arc<Mutex<PeerstoreHandleInner>>);
+
+impl PeerstoreHandle {
+	/// Add known peer to [`Peerstore`].
+	pub fn add_known_peer(&self, peer: PeerId) {
+		self.0
+			.lock()
+			.peers
+			.insert(peer, PeerInfo { reputation: 0i32, last_updated: Instant::now(), role: None });
+	}
+
+	pub fn peer_count(&self) -> usize {
+		self.0.lock().peers.len()
+	}
+
+	fn progress_time(&self, seconds_passed: u64) {
+		if seconds_passed == 0 {
+			return
+		}
+
+		let mut lock = self.0.lock();
+
+		// Drive reputation values towards 0.
+		lock.peers
+			.iter_mut()
+			.for_each(|(_, info)| info.decay_reputation(seconds_passed));
+
+		// Retain only entries with non-zero reputation values or not expired ones.
+		let now = Instant::now();
+		lock.peers
+			.retain(|_, info| info.reputation != 0 || info.last_updated + FORGET_AFTER > now);
+	}
+}
+
+impl PeerStoreProvider for PeerstoreHandle {
+	fn is_banned(&self, peer: &PeerId) -> bool {
+		self.0.lock().peers.get(peer).map_or(false, |info| info.is_banned())
+	}
+
+	/// Register a protocol handle to disconnect peers whose reputation drops below the threshold.
+	fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>) {
+		self.0.lock().protocols.push(protocol_handle);
+	}
+
+	/// Report peer disconnection for reputation adjustment.
+	fn report_disconnect(&self, _peer: PeerId) {
+		unimplemented!();
+	}
+
+	/// Adjust peer reputation.
+	fn report_peer(&self, peer: PeerId, reputation_change: ReputationChange) {
+		let mut lock = self.0.lock();
+
+		log::trace!(target: LOG_TARGET, "report peer {reputation_change:?}");
+
+		match lock.peers.get_mut(&peer) {
+			Some(info) => {
+				info.reputation = info.reputation.saturating_add(reputation_change.value);
+			},
+			None => {
+				lock.peers.insert(
+					peer,
+					PeerInfo {
+						reputation: reputation_change.value,
+						last_updated: Instant::now(),
+						role: None,
+					},
+				);
+			},
+		}
+
+		if lock
+			.peers
+			.get(&peer)
+			.expect("peer exist since it was just modified; qed")
+			.is_banned()
+		{
+			log::warn!(target: LOG_TARGET, "{peer:?} banned, disconnecting, reason: {}", reputation_change.reason);
+
+			for sender in &lock.protocols {
+				sender.disconnect_peer(peer);
+			}
+		}
+	}
+
+	/// Set peer role.
+	fn set_peer_role(&self, peer: &PeerId, role: ObservedRole) {
+		self.0.lock().peers.entry(*peer).or_default().role = Some(role);
+	}
+
+	/// Get peer reputation.
+	fn peer_reputation(&self, peer: &PeerId) -> i32 {
+		self.0.lock().peers.get(peer).map_or(0i32, |info| info.reputation)
+	}
+
+	/// Get peer role, if available.
+	fn peer_role(&self, peer: &PeerId) -> Option<ObservedRole> {
+		self.0.lock().peers.get(peer).and_then(|info| info.role)
+	}
+
+	/// Get candidates with highest reputations for initiating outgoing connections.
+	fn outgoing_candidates(&self, count: usize, ignored: HashSet<PeerId>) -> Vec<PeerId> {
+		let handle = self.0.lock();
+
+		let mut candidates = handle
+			.peers
+			.iter()
+			.filter_map(|(peer, info)| {
+				(!ignored.contains(&peer) && !info.is_banned()).then_some((*peer, info.reputation))
+			})
+			.collect::<Vec<(PeerId, _)>>();
+		candidates.sort_by(|(_, a), (_, b)| b.cmp(a));
+		candidates
+			.into_iter()
+			.take(count)
+			.map(|(peer, _score)| peer)
+			.collect::<Vec<_>>()
+	}
+
+	/// Get the number of known peers.
+	///
+	/// This number might not include some connected peers in rare cases when their reputation
+	/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
+	fn num_known_peers(&self) -> usize {
+		self.0.lock().peers.len()
+	}
+
+	/// Add known peer.
+	fn add_known_peer(&self, peer: PeerId) {
+		self.0.lock().peers.entry(peer).or_default().last_updated = Instant::now();
+	}
+}
+
+/// `Peerstore` handle for testing.
+///
+/// This instance of `Peerstore` is not shared between protocols.
+#[cfg(test)]
+pub fn peerstore_handle_test() -> PeerstoreHandle {
+	PeerstoreHandle(Arc::new(Mutex::new(Default::default())))
+}
+
+/// Peerstore implementation.
+pub struct Peerstore {
+	/// Handle to `Peerstore`.
+	peerstore_handle: PeerstoreHandle,
+}
+
+impl Peerstore {
+	/// Create new [`Peerstore`].
+	pub fn new(bootnodes: Vec<PeerId>) -> Self {
+		let peerstore_handle = PeerstoreHandle(Arc::new(Mutex::new(Default::default())));
+
+		for bootnode in bootnodes {
+			peerstore_handle.add_known_peer(bootnode);
+		}
+
+		Self { peerstore_handle }
+	}
+
+	/// Create new [`Peerstore`] from a [`PeerstoreHandle`].
+	pub fn from_handle(peerstore_handle: PeerstoreHandle, bootnodes: Vec<PeerId>) -> Self {
+		for bootnode in bootnodes {
+			peerstore_handle.add_known_peer(bootnode);
+		}
+
+		Self { peerstore_handle }
+	}
+
+	/// Get mutable reference to the underlying [`PeerstoreHandle`].
+	pub fn handle(&mut self) -> &mut PeerstoreHandle {
+		&mut self.peerstore_handle
+	}
+
+	/// Add known peer to [`Peerstore`].
+	pub fn add_known_peer(&mut self, peer: PeerId) {
+		self.peerstore_handle.add_known_peer(peer);
+	}
+
+	/// Start [`Peerstore`] event loop.
+	async fn run(self) {
+		let started = Instant::now();
+		let mut latest_time_update = started;
+
+		loop {
+			let now = Instant::now();
+			// We basically do `(now - self.latest_update).as_secs()`, except that by the way we do
+			// it we know that we're not going to miss seconds because of rounding to integers.
+			let seconds_passed = {
+				let elapsed_latest = latest_time_update - started;
+				let elapsed_now = now - started;
+				latest_time_update = now;
+				elapsed_now.as_secs() - elapsed_latest.as_secs()
+			};
+
+			self.peerstore_handle.progress_time(seconds_passed);
+			let _ = Delay::new(Duration::from_secs(1)).await;
+		}
+	}
+}
+
+#[async_trait::async_trait]
+impl PeerStore for Peerstore {
+	/// Get handle to `PeerStore`.
+	fn handle(&self) -> Arc<dyn PeerStoreProvider> {
+		Arc::new(self.peerstore_handle.clone())
+	}
+
+	/// Start running `PeerStore` event loop.
+	async fn run(self) {
+		self.run().await;
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::PeerInfo;
+
+	#[test]
+	fn decaying_zero_reputation_yields_zero() {
+		let mut peer_info = PeerInfo::default();
+		assert_eq!(peer_info.reputation, 0);
+
+		peer_info.decay_reputation(1);
+		assert_eq!(peer_info.reputation, 0);
+
+		peer_info.decay_reputation(100_000);
+		assert_eq!(peer_info.reputation, 0);
+	}
+
+	#[test]
+	fn decaying_positive_reputation_decreases_it() {
+		const INITIAL_REPUTATION: i32 = 100;
+
+		let mut peer_info = PeerInfo::default();
+		peer_info.reputation = INITIAL_REPUTATION;
+
+		peer_info.decay_reputation(1);
+		assert!(peer_info.reputation >= 0);
+		assert!(peer_info.reputation < INITIAL_REPUTATION);
+	}
+
+	#[test]
+	fn decaying_negative_reputation_increases_it() {
+		const INITIAL_REPUTATION: i32 = -100;
+
+		let mut peer_info = PeerInfo::default();
+		peer_info.reputation = INITIAL_REPUTATION;
+
+		peer_info.decay_reputation(1);
+		assert!(peer_info.reputation <= 0);
+		assert!(peer_info.reputation > INITIAL_REPUTATION);
+	}
+
+	#[test]
+	fn decaying_max_reputation_finally_yields_zero() {
+		const INITIAL_REPUTATION: i32 = i32::MAX;
+		const SECONDS: u64 = 1000;
+
+		let mut peer_info = PeerInfo::default();
+		peer_info.reputation = INITIAL_REPUTATION;
+
+		peer_info.decay_reputation(SECONDS / 2);
+		assert!(peer_info.reputation > 0);
+
+		peer_info.decay_reputation(SECONDS / 2);
+		assert_eq!(peer_info.reputation, 0);
+	}
+
+	#[test]
+	fn decaying_min_reputation_finally_yields_zero() {
+		const INITIAL_REPUTATION: i32 = i32::MIN;
+		const SECONDS: u64 = 1000;
+
+		let mut peer_info = PeerInfo::default();
+		peer_info.reputation = INITIAL_REPUTATION;
+
+		peer_info.decay_reputation(SECONDS / 2);
+		assert!(peer_info.reputation < 0);
+
+		peer_info.decay_reputation(SECONDS / 2);
+		assert_eq!(peer_info.reputation, 0);
+	}
+}
diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs
new file mode 100644
index 0000000000000000000000000000000000000000..86f11aa6e142e2a1bdc161e7a92e34f5833cd1bb
--- /dev/null
+++ b/substrate/client/network/src/litep2p/service.rs
@@ -0,0 +1,469 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! `NetworkService` implementation for `litep2p`.
+
+use crate::{
+	config::MultiaddrWithPeerId,
+	litep2p::shim::{
+		notification::{config::ProtocolControlHandle, peerset::PeersetCommand},
+		request_response::OutboundRequest,
+	},
+	multiaddr::Protocol,
+	network_state::NetworkState,
+	peer_store::PeerStoreProvider,
+	service::out_events,
+	Event, IfDisconnected, NetworkDHTProvider, NetworkEventStream, NetworkPeers, NetworkRequest,
+	NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, ProtocolName,
+	RequestFailure, Signature,
+};
+
+use codec::DecodeAll;
+use futures::{channel::oneshot, stream::BoxStream};
+use libp2p::{identity::SigningError, kad::record::Key as KademliaKey, Multiaddr};
+use litep2p::crypto::ed25519::Keypair;
+use parking_lot::RwLock;
+
+use sc_network_common::{
+	role::{ObservedRole, Roles},
+	types::ReputationChange,
+};
+use sc_network_types::PeerId;
+use sc_utils::mpsc::TracingUnboundedSender;
+
+use std::{
+	collections::{HashMap, HashSet},
+	sync::{atomic::Ordering, Arc},
+};
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p";
+
+/// Commands sent by [`Litep2pNetworkService`] to
+/// [`Litep2pNetworkBackend`](super::Litep2pNetworkBackend).
+#[derive(Debug)]
+pub enum NetworkServiceCommand {
+	/// Get value from DHT.
+	GetValue {
+		/// Record key.
+		key: KademliaKey,
+	},
+
+	/// Put value to DHT.
+	PutValue {
+		/// Record key.
+		key: KademliaKey,
+
+		/// Record value.
+		value: Vec<u8>,
+	},
+
+	/// Query network status.
+	Status {
+		/// `oneshot::Sender` for sending the status.
+		tx: oneshot::Sender<NetworkStatus>,
+	},
+
+	/// Add `peers` to `protocol`'s reserved set.
+	AddPeersToReservedSet {
+		/// Protocol.
+		protocol: ProtocolName,
+
+		/// Reserved peers.
+		peers: HashSet<Multiaddr>,
+	},
+
+	/// Add known address for peer.
+	AddKnownAddress {
+		/// Peer ID.
+		peer: PeerId,
+
+		/// Address.
+		address: Multiaddr,
+	},
+
+	/// Set reserved peers for `protocol`.
+	SetReservedPeers {
+		/// Protocol.
+		protocol: ProtocolName,
+
+		/// Reserved peers.
+		peers: HashSet<Multiaddr>,
+	},
+
+	/// Disconnect peer from protocol.
+	DisconnectPeer {
+		/// Protocol.
+		protocol: ProtocolName,
+
+		/// Peer ID.
+		peer: PeerId,
+	},
+
+	/// Set protocol to reserved only (true/false) mode.
+	SetReservedOnly {
+		/// Protocol.
+		protocol: ProtocolName,
+
+		/// Reserved only?
+		reserved_only: bool,
+	},
+
+	/// Remove reserved peers from protocol.
+	RemoveReservedPeers {
+		/// Protocol.
+		protocol: ProtocolName,
+
+		/// Peers to remove from the reserved set.
+		peers: HashSet<PeerId>,
+	},
+
+	/// Create event stream for DHT events.
+	EventStream {
+		/// Sender for the events.
+		tx: out_events::Sender,
+	},
+}
+
+/// `NetworkService` implementation for `litep2p`.
+#[derive(Debug, Clone)]
+pub struct Litep2pNetworkService {
+	/// Local peer ID.
+	local_peer_id: litep2p::PeerId,
+
+	/// The `KeyPair` that defines the `PeerId` of the local node.
+	keypair: Keypair,
+
+	/// TX channel for sending commands to [`Litep2pNetworkBackend`](super::Litep2pNetworkBackend).
+	cmd_tx: TracingUnboundedSender<NetworkServiceCommand>,
+
+	/// Handle to `PeerStore`.
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
+
+	/// Peerset handles.
+	peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
+
+	/// Name for the block announce protocol.
+	block_announce_protocol: ProtocolName,
+
+	/// Installed request-response protocols.
+	request_response_protocols: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
+
+	/// Listen addresses.
+	listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+
+	/// External addresses.
+	external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+}
+
+impl Litep2pNetworkService {
+	/// Create new [`Litep2pNetworkService`].
+	pub fn new(
+		local_peer_id: litep2p::PeerId,
+		keypair: Keypair,
+		cmd_tx: TracingUnboundedSender<NetworkServiceCommand>,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+		peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
+		block_announce_protocol: ProtocolName,
+		request_response_protocols: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
+		listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+		external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
+	) -> Self {
+		Self {
+			local_peer_id,
+			keypair,
+			cmd_tx,
+			peer_store_handle,
+			peerset_handles,
+			block_announce_protocol,
+			request_response_protocols,
+			listen_addresses,
+			external_addresses,
+		}
+	}
+}
+
+impl NetworkSigner for Litep2pNetworkService {
+	fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
+		let public_key = self.keypair.public();
+		let bytes = self.keypair.sign(msg.as_ref());
+
+		Ok(Signature {
+			public_key: crate::service::signature::PublicKey::Litep2p(
+				litep2p::crypto::PublicKey::Ed25519(public_key),
+			),
+			bytes,
+		})
+	}
+
+	fn verify(
+		&self,
+		peer: PeerId,
+		public_key: &Vec<u8>,
+		signature: &Vec<u8>,
+		message: &Vec<u8>,
+	) -> Result<bool, String> {
+		let public_key = litep2p::crypto::PublicKey::from_protobuf_encoding(&public_key)
+			.map_err(|error| error.to_string())?;
+		let peer: litep2p::PeerId = peer.into();
+
+		Ok(peer == public_key.to_peer_id() && public_key.verify(message, signature))
+	}
+}
+
+impl NetworkDHTProvider for Litep2pNetworkService {
+	fn get_value(&self, key: &KademliaKey) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::GetValue { key: key.clone() });
+	}
+
+	fn put_value(&self, key: KademliaKey, value: Vec<u8>) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValue { key, value });
+	}
+}
+
+#[async_trait::async_trait]
+impl NetworkStatusProvider for Litep2pNetworkService {
+	async fn status(&self) -> Result<NetworkStatus, ()> {
+		let (tx, rx) = oneshot::channel();
+		self.cmd_tx
+			.unbounded_send(NetworkServiceCommand::Status { tx })
+			.map_err(|_| ())?;
+
+		rx.await.map_err(|_| ())
+	}
+
+	async fn network_state(&self) -> Result<NetworkState, ()> {
+		Ok(NetworkState {
+			peer_id: self.local_peer_id.to_base58(),
+			listened_addresses: self.listen_addresses.read().iter().cloned().collect(),
+			external_addresses: self.external_addresses.read().iter().cloned().collect(),
+			connected_peers: HashMap::new(),
+			not_connected_peers: HashMap::new(),
+			// TODO: Check what info we can include here.
+			//       Issue reference: https://github.com/paritytech/substrate/issues/14160.
+			peerset: serde_json::json!(
+				"Unimplemented. See https://github.com/paritytech/substrate/issues/14160."
+			),
+		})
+	}
+}
+
+// Manual implementation to avoid extra boxing here
+// TODO: functions modifying peerset state could be modified to call peerset directly if the
+// `Multiaddr` only contains a `PeerId`
+#[async_trait::async_trait]
+impl NetworkPeers for Litep2pNetworkService {
+	fn set_authorized_peers(&self, peers: HashSet<PeerId>) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedPeers {
+			protocol: self.block_announce_protocol.clone(),
+			peers: peers
+				.into_iter()
+				.map(|peer| Multiaddr::empty().with(Protocol::P2p(peer.into())))
+				.collect(),
+		});
+	}
+
+	fn set_authorized_only(&self, reserved_only: bool) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
+			protocol: self.block_announce_protocol.clone(),
+			reserved_only,
+		});
+	}
+
+	fn add_known_address(&self, peer: PeerId, address: Multiaddr) {
+		let _ = self
+			.cmd_tx
+			.unbounded_send(NetworkServiceCommand::AddKnownAddress { peer, address });
+	}
+
+	fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
+		self.peer_store_handle.peer_reputation(peer_id)
+	}
+
+	fn report_peer(&self, peer: PeerId, cost_benefit: ReputationChange) {
+		self.peer_store_handle.report_peer(peer, cost_benefit);
+	}
+
+	fn disconnect_peer(&self, peer: PeerId, protocol: ProtocolName) {
+		let _ = self
+			.cmd_tx
+			.unbounded_send(NetworkServiceCommand::DisconnectPeer { protocol, peer });
+	}
+
+	fn accept_unreserved_peers(&self) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
+			protocol: self.block_announce_protocol.clone(),
+			reserved_only: false,
+		});
+	}
+
+	fn deny_unreserved_peers(&self) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
+			protocol: self.block_announce_protocol.clone(),
+			reserved_only: true,
+		});
+	}
+
+	fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet {
+			protocol: self.block_announce_protocol.clone(),
+			peers: HashSet::from_iter([peer.concat()]),
+		});
+
+		Ok(())
+	}
+
+	fn remove_reserved_peer(&self, peer: PeerId) {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::RemoveReservedPeers {
+			protocol: self.block_announce_protocol.clone(),
+			peers: HashSet::from_iter([peer]),
+		});
+	}
+
+	fn set_reserved_peers(
+		&self,
+		protocol: ProtocolName,
+		peers: HashSet<Multiaddr>,
+	) -> Result<(), String> {
+		let _ = self
+			.cmd_tx
+			.unbounded_send(NetworkServiceCommand::SetReservedPeers { protocol, peers });
+		Ok(())
+	}
+
+	fn add_peers_to_reserved_set(
+		&self,
+		protocol: ProtocolName,
+		peers: HashSet<Multiaddr>,
+	) -> Result<(), String> {
+		let _ = self
+			.cmd_tx
+			.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet { protocol, peers });
+		Ok(())
+	}
+
+	fn remove_peers_from_reserved_set(
+		&self,
+		protocol: ProtocolName,
+		peers: Vec<PeerId>,
+	) -> Result<(), String> {
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::RemoveReservedPeers {
+			protocol,
+			peers: peers.into_iter().map(From::from).collect(),
+		});
+
+		Ok(())
+	}
+
+	fn sync_num_connected(&self) -> usize {
+		self.peerset_handles
+			.get(&self.block_announce_protocol)
+			.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed))
+	}
+
+	fn peer_role(&self, peer: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+		match Roles::decode_all(&mut &handshake[..]) {
+			Ok(role) => Some(role.into()),
+			Err(_) => {
+				log::debug!(target: LOG_TARGET, "handshake doesn't contain peer role: {handshake:?}");
+				self.peer_store_handle.peer_role(&(peer.into()))
+			},
+		}
+	}
+
+	/// Get the list of reserved peers.
+	///
+	/// Returns an error if the `NetworkWorker` is no longer running.
+	async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
+		let Some(handle) = self.peerset_handles.get(&self.block_announce_protocol) else {
+			return Err(())
+		};
+		let (tx, rx) = oneshot::channel();
+
+		handle
+			.tx
+			.unbounded_send(PeersetCommand::GetReservedPeers { tx })
+			.map_err(|_| ())?;
+
+		// the channel can only be closed if `Peerset` no longer exists
+		rx.await.map_err(|_| ())
+	}
+}
+
+impl NetworkEventStream for Litep2pNetworkService {
+	fn event_stream(&self, stream_name: &'static str) -> BoxStream<'static, Event> {
+		let (tx, rx) = out_events::channel(stream_name, 100_000);
+		let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::EventStream { tx });
+		Box::pin(rx)
+	}
+}
+
+impl NetworkStateInfo for Litep2pNetworkService {
+	fn external_addresses(&self) -> Vec<Multiaddr> {
+		self.external_addresses.read().iter().cloned().collect()
+	}
+
+	fn listen_addresses(&self) -> Vec<Multiaddr> {
+		self.listen_addresses.read().iter().cloned().collect()
+	}
+
+	fn local_peer_id(&self) -> PeerId {
+		self.local_peer_id.into()
+	}
+}
+
+// Manual implementation to avoid extra boxing here
+#[async_trait::async_trait]
+impl NetworkRequest for Litep2pNetworkService {
+	async fn request(
+		&self,
+		_target: PeerId,
+		_protocol: ProtocolName,
+		_request: Vec<u8>,
+		_fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		_connect: IfDisconnected,
+	) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
+		unimplemented!();
+	}
+
+	fn start_request(
+		&self,
+		peer: PeerId,
+		protocol: ProtocolName,
+		request: Vec<u8>,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+		connect: IfDisconnected,
+	) {
+		match self.request_response_protocols.get(&protocol) {
+			Some(tx) => {
+				let _ = tx.unbounded_send(OutboundRequest::new(
+					peer,
+					request,
+					sender,
+					fallback_request,
+					connect,
+				));
+			},
+			None => log::warn!(
+				target: LOG_TARGET,
+				"{protocol} doesn't exist, cannot send request to {peer:?}"
+			),
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/bitswap.rs b/substrate/client/network/src/litep2p/shim/bitswap.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d347317ae9f98f09423053fb9d32bbac5314ba57
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/bitswap.rs
@@ -0,0 +1,104 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Shim for litep2p's Bitswap implementation to make it work with `sc-network`.
+
+use futures::StreamExt;
+use litep2p::protocol::libp2p::bitswap::{
+	BitswapEvent, BitswapHandle, BlockPresenceType, Config, ResponseType, WantType,
+};
+
+use sc_client_api::BlockBackend;
+use sp_runtime::traits::Block as BlockT;
+
+use std::{future::Future, pin::Pin, sync::Arc};
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::bitswap";
+
+pub struct BitswapServer<Block: BlockT> {
+	/// Bitswap handle.
+	handle: BitswapHandle,
+
+	/// Blockchain client.
+	client: Arc<dyn BlockBackend<Block> + Send + Sync>,
+}
+
+impl<Block: BlockT> BitswapServer<Block> {
+	/// Create new [`BitswapServer`].
+	pub fn new(
+		client: Arc<dyn BlockBackend<Block> + Send + Sync>,
+	) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Config) {
+		let (config, handle) = Config::new();
+		let bitswap = Self { client, handle };
+
+		(Box::pin(async move { bitswap.run().await }), config)
+	}
+
+	async fn run(mut self) {
+		log::debug!(target: LOG_TARGET, "starting bitswap server");
+
+		while let Some(event) = self.handle.next().await {
+			match event {
+				BitswapEvent::Request { peer, cids } => {
+					log::debug!(target: LOG_TARGET, "handle bitswap request from {peer:?} for {cids:?}");
+
+					let response: Vec<ResponseType> = cids
+						.into_iter()
+						.map(|(cid, want_type)| {
+							let mut hash = Block::Hash::default();
+							hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]);
+							let transaction = match self.client.indexed_transaction(hash) {
+								Ok(ex) => ex,
+								Err(error) => {
+									log::error!(target: LOG_TARGET, "error retrieving transaction {hash}: {error}");
+									None
+								},
+							};
+
+							match transaction {
+								Some(transaction) => {
+									log::trace!(target: LOG_TARGET, "found cid {cid:?}, hash {hash:?}");
+
+									match want_type {
+										WantType::Block =>
+											ResponseType::Block { cid, block: transaction },
+										_ => ResponseType::Presence {
+											cid,
+											presence: BlockPresenceType::Have,
+										},
+									}
+								},
+								None => {
+									log::trace!(target: LOG_TARGET, "missing cid {cid:?}, hash {hash:?}");
+
+									ResponseType::Presence {
+										cid,
+										presence: BlockPresenceType::DontHave,
+									}
+								},
+							}
+						})
+						.collect();
+
+					self.handle.send_response(peer, response).await;
+				},
+			}
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/mod.rs b/substrate/client/network/src/litep2p/shim/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5eaf77ff0a4b4ab1e021528d26f1062135ef8a69
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/mod.rs
@@ -0,0 +1,23 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Shims for fitting `litep2p` APIs to `sc-network` APIs.
+
+pub(crate) mod bitswap;
+pub(crate) mod notification;
+pub(crate) mod request_response;
diff --git a/substrate/client/network/src/litep2p/shim/notification/config.rs b/substrate/client/network/src/litep2p/shim/notification/config.rs
new file mode 100644
index 0000000000000000000000000000000000000000..70e136da4ed998028032f475ac8bd7e481dc8b2a
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/config.rs
@@ -0,0 +1,168 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! `litep2p` notification protocol configuration.
+
+use crate::{
+	config::{MultiaddrWithPeerId, NonReservedPeerMode, NotificationHandshake, SetConfig},
+	litep2p::shim::notification::{
+		peerset::{Peerset, PeersetCommand},
+		NotificationProtocol,
+	},
+	peer_store::PeerStoreProvider,
+	service::{metrics::NotificationMetrics, traits::NotificationConfig},
+	NotificationService, ProtocolName,
+};
+
+use litep2p::protocol::notification::{Config, ConfigBuilder};
+
+use sc_utils::mpsc::TracingUnboundedSender;
+
+use std::sync::{atomic::AtomicUsize, Arc};
+
+/// Handle for controlling the notification protocol.
+#[derive(Debug, Clone)]
+pub struct ProtocolControlHandle {
+	/// TX channel for sending commands to `Peerset` of the notification protocol.
+	pub tx: TracingUnboundedSender<PeersetCommand>,
+
+	/// Peers currently connected to this protocol.
+	pub connected_peers: Arc<AtomicUsize>,
+}
+
+impl ProtocolControlHandle {
+	/// Create new [`ProtocolControlHandle`].
+	pub fn new(
+		tx: TracingUnboundedSender<PeersetCommand>,
+		connected_peers: Arc<AtomicUsize>,
+	) -> Self {
+		Self { tx, connected_peers }
+	}
+}
+
+/// Configuration for the notification protocol.
+#[derive(Debug)]
+pub struct NotificationProtocolConfig {
+	/// Name of the notifications protocols of this set. A substream on this set will be
+	/// considered established once this protocol is open.
+	pub protocol_name: ProtocolName,
+
+	/// Maximum allowed size of single notifications.
+	max_notification_size: usize,
+
+	/// Base configuration.
+	set_config: SetConfig,
+
+	/// `litep2p` notification config.
+	pub config: Config,
+
+	/// Handle for controlling the notification protocol.
+	pub handle: ProtocolControlHandle,
+}
+
+impl NotificationProtocolConfig {
+	// Create new [`NotificationProtocolConfig`].
+	pub fn new(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_notification_size: usize,
+		handshake: Option<NotificationHandshake>,
+		set_config: SetConfig,
+		metrics: NotificationMetrics,
+		peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self, Box<dyn NotificationService>) {
+		// create `Peerset`/`Peerstore` handle for the protocol
+		let connected_peers = Arc::new(Default::default());
+		let (peerset, peerset_tx) = Peerset::new(
+			protocol_name.clone(),
+			set_config.out_peers as usize,
+			set_config.in_peers as usize,
+			set_config.non_reserved_mode == NonReservedPeerMode::Deny,
+			set_config.reserved_nodes.iter().map(|address| address.peer_id).collect(),
+			Arc::clone(&connected_peers),
+			peerstore_handle,
+		);
+
+		// create `litep2p` notification protocol configuration for the protocol
+		//
+		// NOTE: currently only dummy value is given as the handshake as protocols (apart from
+		// syncing) are not configuring their own handshake and instead default to role being the
+		// handshake. As the time of writing this, most protocols are not aware of the role and
+		// that should be refactored in the future.
+		let (config, handle) = ConfigBuilder::new(protocol_name.clone().into())
+			.with_handshake(handshake.map_or(vec![1], |handshake| (*handshake).to_vec()))
+			.with_max_size(max_notification_size as usize)
+			.with_auto_accept_inbound(true)
+			.with_fallback_names(fallback_names.into_iter().map(From::from).collect())
+			.build();
+
+		// initialize the actual object implementing `NotificationService` and combine the
+		// `litep2p::NotificationHandle` with `Peerset` to implement a full and independent
+		// notification protocol runner
+		let protocol = NotificationProtocol::new(protocol_name.clone(), handle, peerset, metrics);
+
+		(
+			Self {
+				protocol_name,
+				max_notification_size,
+				set_config,
+				config,
+				handle: ProtocolControlHandle::new(peerset_tx, connected_peers),
+			},
+			Box::new(protocol),
+		)
+	}
+
+	/// Get reference to protocol name.
+	pub fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+
+	/// Get reference to `SetConfig`.
+	pub fn set_config(&self) -> &SetConfig {
+		&self.set_config
+	}
+
+	/// Modifies the configuration to allow non-reserved nodes.
+	pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) {
+		self.set_config.in_peers = in_peers;
+		self.set_config.out_peers = out_peers;
+		self.set_config.non_reserved_mode = NonReservedPeerMode::Accept;
+	}
+
+	/// Add a node to the list of reserved nodes.
+	pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) {
+		self.set_config.reserved_nodes.push(peer);
+	}
+
+	/// Get maximum notification size.
+	pub fn max_notification_size(&self) -> usize {
+		self.max_notification_size
+	}
+}
+
+impl NotificationConfig for NotificationProtocolConfig {
+	fn set_config(&self) -> &SetConfig {
+		&self.set_config
+	}
+
+	/// Get reference to protocol name.
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/notification/mod.rs b/substrate/client/network/src/litep2p/shim/notification/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8a320a00b000d4441c6e4b497d1b5cdcaaaff15c
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/mod.rs
@@ -0,0 +1,374 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Shim for `litep2p::NotificationHandle` to combine `Peerset`-like behavior
+//! with `NotificationService`.
+
+use crate::{
+	error::Error,
+	litep2p::shim::notification::peerset::{OpenResult, Peerset, PeersetNotificationCommand},
+	service::{
+		metrics::NotificationMetrics,
+		traits::{NotificationEvent as SubstrateNotificationEvent, ValidationResult},
+	},
+	MessageSink, NotificationService, ProtocolName,
+};
+
+use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt};
+use litep2p::protocol::notification::{
+	NotificationEvent, NotificationHandle, NotificationSink,
+	ValidationResult as Litep2pValidationResult,
+};
+use tokio::sync::oneshot;
+
+use sc_network_types::PeerId;
+
+use std::{collections::HashSet, fmt};
+
+pub mod config;
+pub mod peerset;
+
+#[cfg(test)]
+mod tests;
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::notification";
+
+/// Wrapper over `litep2p`'s notification sink.
+pub struct Litep2pMessageSink {
+	/// Protocol.
+	protocol: ProtocolName,
+
+	/// Remote peer ID.
+	peer: PeerId,
+
+	/// Notification sink.
+	sink: NotificationSink,
+
+	/// Notification metrics.
+	metrics: NotificationMetrics,
+}
+
+impl Litep2pMessageSink {
+	/// Create new [`Litep2pMessageSink`].
+	fn new(
+		peer: PeerId,
+		protocol: ProtocolName,
+		sink: NotificationSink,
+		metrics: NotificationMetrics,
+	) -> Self {
+		Self { protocol, peer, sink, metrics }
+	}
+}
+
+#[async_trait::async_trait]
+impl MessageSink for Litep2pMessageSink {
+	/// Send synchronous `notification` to the peer associated with this [`MessageSink`].
+	fn send_sync_notification(&self, notification: Vec<u8>) {
+		let size = notification.len();
+
+		match self.sink.send_sync_notification(notification) {
+			Ok(_) => self.metrics.register_notification_sent(&self.protocol, size),
+			Err(error) => log::trace!(
+				target: LOG_TARGET,
+				"{}: failed to send sync notification to {:?}: {error:?}",
+				self.protocol,
+				self.peer,
+			),
+		}
+	}
+
+	/// Send an asynchronous `notification` to to the peer associated with this [`MessageSink`],
+	/// allowing sender to exercise backpressure.
+	///
+	/// Returns an error if the peer does not exist.
+	async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), Error> {
+		let size = notification.len();
+
+		match self.sink.send_async_notification(notification).await {
+			Ok(_) => {
+				self.metrics.register_notification_sent(&self.protocol, size);
+				Ok(())
+			},
+			Err(error) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: failed to send async notification to {:?}: {error:?}",
+					self.protocol,
+					self.peer,
+				);
+
+				Err(Error::Litep2p(error))
+			},
+		}
+	}
+}
+
+/// Notification protocol implementation.
+pub struct NotificationProtocol {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// `litep2p` notification handle.
+	handle: NotificationHandle,
+
+	/// Peerset for the notification protocol.
+	///
+	/// Listens to peering-related events and either opens or closes substreams to remote peers.
+	peerset: Peerset,
+
+	/// Pending validations for inbound substreams.
+	pending_validations: FuturesUnordered<
+		BoxFuture<'static, (PeerId, Result<ValidationResult, oneshot::error::RecvError>)>,
+	>,
+
+	/// Pending cancels.
+	pending_cancels: HashSet<litep2p::PeerId>,
+
+	/// Notification metrics.
+	metrics: NotificationMetrics,
+}
+
+impl fmt::Debug for NotificationProtocol {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		f.debug_struct("NotificationProtocol")
+			.field("protocol", &self.protocol)
+			.field("handle", &self.handle)
+			.finish()
+	}
+}
+
+impl NotificationProtocol {
+	/// Create new [`NotificationProtocol`].
+	pub fn new(
+		protocol: ProtocolName,
+		handle: NotificationHandle,
+		peerset: Peerset,
+		metrics: NotificationMetrics,
+	) -> Self {
+		Self {
+			protocol,
+			handle,
+			peerset,
+			metrics,
+			pending_cancels: HashSet::new(),
+			pending_validations: FuturesUnordered::new(),
+		}
+	}
+
+	/// Handle `Peerset` command.
+	async fn on_peerset_command(&mut self, command: PeersetNotificationCommand) {
+		match command {
+			PeersetNotificationCommand::OpenSubstream { peers } => {
+				log::debug!(target: LOG_TARGET, "{}: open substreams to {peers:?}", self.protocol);
+
+				let _ = self.handle.open_substream_batch(peers.into_iter().map(From::from)).await;
+			},
+			PeersetNotificationCommand::CloseSubstream { peers } => {
+				log::debug!(target: LOG_TARGET, "{}: close substreams to {peers:?}", self.protocol);
+
+				self.handle.close_substream_batch(peers.into_iter().map(From::from)).await;
+			},
+		}
+	}
+}
+
+#[async_trait::async_trait]
+impl NotificationService for NotificationProtocol {
+	async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+		unimplemented!();
+	}
+
+	fn send_sync_notification(&mut self, peer: &PeerId, notification: Vec<u8>) {
+		let size = notification.len();
+
+		if let Ok(_) = self.handle.send_sync_notification(peer.into(), notification) {
+			self.metrics.register_notification_sent(&self.protocol, size);
+		}
+	}
+
+	async fn send_async_notification(
+		&mut self,
+		peer: &PeerId,
+		notification: Vec<u8>,
+	) -> Result<(), Error> {
+		let size = notification.len();
+
+		match self.handle.send_async_notification(peer.into(), notification).await {
+			Ok(_) => {
+				self.metrics.register_notification_sent(&self.protocol, size);
+				Ok(())
+			},
+			Err(_) => Err(Error::ChannelClosed),
+		}
+	}
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	async fn set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
+		self.handle.set_handshake(handshake);
+
+		Ok(())
+	}
+
+	/// Set handshake for the notification protocol replacing the old handshake.
+	///
+	/// For `litep2p` this is identical to `NotificationService::set_handshake()` since `litep2p`
+	/// allows updating the handshake synchronously.
+	fn try_set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
+		self.handle.set_handshake(handshake);
+
+		Ok(())
+	}
+
+	/// Make a copy of the object so it can be shared between protocol components
+	/// who wish to have access to the same underlying notification protocol.
+	fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
+		unimplemented!("clonable `NotificationService` not supported by `litep2p`");
+	}
+
+	/// Get protocol name of the `NotificationService`.
+	fn protocol(&self) -> &ProtocolName {
+		&self.protocol
+	}
+
+	/// Get message sink of the peer.
+	fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
+		self.handle.notification_sink(peer.into()).map(|sink| {
+			let sink: Box<dyn MessageSink> = Box::new(Litep2pMessageSink::new(
+				*peer,
+				self.protocol.clone(),
+				sink,
+				self.metrics.clone(),
+			));
+			sink
+		})
+	}
+
+	/// Get next event from the `Notifications` event stream.
+	async fn next_event(&mut self) -> Option<SubstrateNotificationEvent> {
+		loop {
+			tokio::select! {
+				biased;
+
+				event = self.handle.next() => match event? {
+					NotificationEvent::ValidateSubstream { peer, handshake, .. } => {
+						if let ValidationResult::Reject = self.peerset.report_inbound_substream(peer.into()) {
+							self.handle.send_validation_result(peer, Litep2pValidationResult::Reject);
+							continue;
+						}
+
+						let (tx, rx) = oneshot::channel();
+						self.pending_validations.push(Box::pin(async move { (peer.into(), rx.await) }));
+
+						log::trace!(target: LOG_TARGET, "{}: validate substream for {peer:?}", self.protocol);
+
+						return Some(SubstrateNotificationEvent::ValidateInboundSubstream {
+							peer: peer.into(),
+							handshake,
+							result_tx: tx,
+						});
+					}
+					NotificationEvent::NotificationStreamOpened {
+						peer,
+						fallback,
+						handshake,
+						direction,
+						..
+					} => {
+						self.metrics.register_substream_opened(&self.protocol);
+
+						match self.peerset.report_substream_opened(peer.into(), direction.into()) {
+							OpenResult::Reject => {
+								let _ = self.handle.close_substream_batch(vec![peer].into_iter().map(From::from)).await;
+								self.pending_cancels.insert(peer);
+
+								continue
+							}
+							OpenResult::Accept { direction } => {
+								log::trace!(target: LOG_TARGET, "{}: substream opened for {peer:?}", self.protocol);
+
+								return Some(SubstrateNotificationEvent::NotificationStreamOpened {
+									peer: peer.into(),
+									handshake,
+									direction,
+									negotiated_fallback: fallback.map(From::from),
+								});
+							}
+						}
+					}
+					NotificationEvent::NotificationStreamClosed {
+						peer,
+					} => {
+						log::trace!(target: LOG_TARGET, "{}: substream closed for {peer:?}", self.protocol);
+
+						self.metrics.register_substream_closed(&self.protocol);
+						self.peerset.report_substream_closed(peer.into());
+
+						if self.pending_cancels.remove(&peer) {
+							log::debug!(
+								target: LOG_TARGET,
+								"{}: substream closed to canceled peer ({peer:?})",
+								self.protocol
+							);
+							continue
+						}
+
+						return Some(SubstrateNotificationEvent::NotificationStreamClosed { peer: peer.into() })
+					}
+					NotificationEvent::NotificationStreamOpenFailure {
+						peer,
+						error,
+					} => {
+						log::trace!(target: LOG_TARGET, "{}: open failure for {peer:?}", self.protocol);
+						self.peerset.report_substream_open_failure(peer.into(), error);
+					}
+					NotificationEvent::NotificationReceived {
+						peer,
+						notification,
+					} => {
+						self.metrics.register_notification_received(&self.protocol, notification.len());
+
+						if !self.pending_cancels.contains(&peer) {
+							return Some(SubstrateNotificationEvent::NotificationReceived {
+								peer: peer.into(),
+								notification: notification.to_vec(),
+							});
+						}
+					}
+				},
+				result = self.pending_validations.next(), if !self.pending_validations.is_empty() => {
+					let (peer, result) = result?;
+					let validation_result = match result {
+						Ok(ValidationResult::Accept) => Litep2pValidationResult::Accept,
+						_ => {
+							self.peerset.report_substream_rejected(peer);
+							Litep2pValidationResult::Reject
+						}
+					};
+
+					self.handle.send_validation_result(peer.into(), validation_result);
+				}
+				command = self.peerset.next() => self.on_peerset_command(command?).await,
+			}
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/notification/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/peerset.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2fd7920909e334c2f4756982bbd6937781dea4a7
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/peerset.rs
@@ -0,0 +1,1419 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! [`Peerset`] implementation for `litep2p`.
+//!
+//! [`Peerset`] is a separate but related component running alongside the notification protocol,
+//! responsible for maintaining connectivity to remote peers. [`Peerset`] has an imperfect view of
+//! the network as the notification protocol is behind an asynchronous channel. Based on this
+//! imperfect view, it tries to connect to remote peers and disconnect peers that should be
+//! disconnected from.
+//!
+//! [`Peerset`] knows of two types of peers:
+//!  - normal peers
+//!  - reserved peers
+//!
+//! Reserved peers are those which the [`Peerset`] should be connected at all times and it will make
+//! an effort to do so by constantly checking that there are no disconnected reserved peers (except
+//! banned) and if there are, it will open substreams to them.
+//!
+//! [`Peerset`] may also contain "slots", both inbound and outbound, which mark how many incoming
+//! and outgoing connections it should maintain at all times. Peers for the inbound slots are filled
+//! by remote peers opening inbound substreams towards the local node and peers for the outbound
+//! slots are filled by querying the `Peerstore` which contains all peers known to `sc-network`.
+//! Peers for outbound slots are selected in a decreasing order of reputation.
+
+use crate::{
+	peer_store::{PeerStoreProvider, ProtocolHandle},
+	service::traits::{self, ValidationResult},
+	ProtocolName, ReputationChange as Reputation,
+};
+
+use futures::{channel::oneshot, future::BoxFuture, stream::FuturesUnordered, Stream, StreamExt};
+use futures_timer::Delay;
+use litep2p::protocol::notification::NotificationError;
+
+use sc_network_types::PeerId;
+use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
+
+use std::{
+	collections::{HashMap, HashSet},
+	future::Future,
+	pin::Pin,
+	sync::{
+		atomic::{AtomicUsize, Ordering},
+		Arc,
+	},
+	task::{Context, Poll},
+	time::Duration,
+};
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::peerset";
+
+/// Default backoff for connection re-attempts.
+const DEFAULT_BACKOFF: Duration = Duration::from_secs(5);
+
+/// Open failure backoff.
+const OPEN_FAILURE_BACKOFF: Duration = Duration::from_secs(5);
+
+/// Slot allocation frequency.
+///
+/// How often should [`Peerset`] attempt to establish outbound connections.
+const SLOT_ALLOCATION_FREQUENCY: Duration = Duration::from_secs(1);
+
+/// Reputation adjustment when a peer gets disconnected.
+///
+/// Lessens the likelyhood of the peer getting selected for an outbound connection soon.
+const DISCONNECT_ADJUSTMENT: Reputation = Reputation::new(-256, "Peer disconnected");
+
+/// Reputation adjustment when a substream fails to open.
+///
+/// Lessens the likelyhood of the peer getting selected for an outbound connection soon.
+const OPEN_FAILURE_ADJUSTMENT: Reputation = Reputation::new(-1024, "Open failure");
+
+/// Is the peer reserved?
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum Reserved {
+	Yes,
+	No,
+}
+
+impl From<bool> for Reserved {
+	fn from(value: bool) -> Reserved {
+		match value {
+			true => Reserved::Yes,
+			false => Reserved::No,
+		}
+	}
+}
+
+impl From<Reserved> for bool {
+	fn from(value: Reserved) -> bool {
+		std::matches!(value, Reserved::Yes)
+	}
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum Direction {
+	/// Inbound substream.
+	Inbound(Reserved),
+
+	/// Outbound substream.
+	Outbound(Reserved),
+}
+
+impl From<Direction> for traits::Direction {
+	fn from(direction: Direction) -> traits::Direction {
+		match direction {
+			Direction::Inbound(_) => traits::Direction::Inbound,
+			Direction::Outbound(_) => traits::Direction::Outbound,
+		}
+	}
+}
+
+/// Open result for a fully-opened connection.
+#[derive(PartialEq, Eq)]
+pub enum OpenResult {
+	/// Accept the connection.
+	Accept {
+		/// Direction which [`Peerset`] considers to be correct.
+		direction: traits::Direction,
+	},
+
+	/// Reject the connection because it was canceled while it was opening.
+	Reject,
+}
+
+/// Commands emitted by other subsystems of the blockchain to [`Peerset`].
+#[derive(Debug)]
+pub enum PeersetCommand {
+	/// Set current reserved peer set.
+	///
+	/// This command removes all reserved peers that are not in `peers`.
+	SetReservedPeers {
+		/// New reserved peer set.
+		peers: HashSet<PeerId>,
+	},
+
+	/// Add one or more reserved peers.
+	///
+	/// This command doesn't remove any reserved peers but only add new peers.
+	AddReservedPeers {
+		/// Reserved peers to add.
+		peers: HashSet<PeerId>,
+	},
+
+	/// Remove reserved peers.
+	RemoveReservedPeers {
+		/// Reserved peers to remove.
+		peers: HashSet<PeerId>,
+	},
+
+	/// Set reserved-only mode to true/false.
+	SetReservedOnly {
+		/// Should the protocol only accept/establish connections to reserved peers.
+		reserved_only: bool,
+	},
+
+	/// Disconnect peer.
+	DisconnectPeer {
+		/// Peer ID.
+		peer: PeerId,
+	},
+
+	/// Get reserved peers.
+	GetReservedPeers {
+		/// `oneshot::Sender` for sending the current set of reserved peers.
+		tx: oneshot::Sender<Vec<PeerId>>,
+	},
+}
+
+/// Commands emitted by [`Peerset`] to the notification protocol.
+#[derive(Debug)]
+pub enum PeersetNotificationCommand {
+	/// Open substreams to one or more peers.
+	OpenSubstream {
+		/// Peer IDs.
+		peers: Vec<PeerId>,
+	},
+
+	/// Close substream to one or more peers.
+	CloseSubstream {
+		/// Peer IDs.
+		peers: Vec<PeerId>,
+	},
+}
+
+/// Peer state.
+///
+/// Peer can be in 6 different state:
+///  - disconnected
+///  - connected
+///  - connection is opening
+///  - connection is closing
+///  - connection is backed-off
+///  - connection is canceled
+///
+/// Opening and closing are separate states as litep2p guarantees to report when the substream is
+/// either fully open or fully closed and the slot allocation for opening a substream is tied to a
+/// state transition which moves the peer to [`PeerState::Opening`]. This is because it allows
+/// reserving a slot for peer to prevent infinite outbound substreams. If the substream is opened
+/// successfully, peer is moved to state [`PeerState::Connected`] but there is no modification to
+/// the slot count as an outbound slot was already allocated for the peer. If the substream fails to
+/// open, the event is reported by litep2p and [`Peerset::report_substream_open_failure()`] is
+/// called which will decrease the outbound slot count. Similarly for inbound streams, the slot is
+/// allocated in [`Peerset::report_inbound_substream()`] which will prevent `Peerset` from accepting
+/// infinite inbound substreams. If the inbound substream fails to open and since [`Peerset`] was
+/// notified of it, litep2p will report the open failure and the inbound slot count is once again
+/// decreased in [`Peerset::report_substream_open_failure()`]. If the substream is opened
+/// successfully, the slot count is not modified.
+///
+/// Since closing a substream is not instantaneous, there is a separate [`PeerState::Closing`]
+/// state which indicates that the substream is being closed but hasn't been closed by litep2p yet.
+/// This state is used to prevent invalid state transitions where, for example, [`Peerset`] would
+/// close a substream and then try to reopen it immediately.
+///
+/// Irrespective of which side closed the substream (local/remote), the substream is chilled for a
+/// small amount of time ([`DEFAULT_BACKOFF`]) and during this time no inbound or outbound
+/// substreams are accepted/established. Any request to open an outbound substream while the peer
+/// is backed-off is ignored. If the peer is a reserved peer, an outbound substream is not opened
+/// for them immediately but after the back-off has expired, `Peerset` will attempt to open a
+/// substream to the peer if it's still counted as a reserved peer.
+///
+/// Disconnections and open failures will contribute negatively to the peer score to prevent it from
+/// being selected for another outbound substream request soon after the failure/disconnection. The
+/// reputation decays towards zero over time and eventually the peer will be as likely to be
+/// selected for an outbound substream as any other freshly added peer.
+///
+/// [`Peerset`] must also be able to handle the case where an outbound substream was opened to peer
+/// and while it was opening, an inbound substream was received from that same peer. Since `litep2p`
+/// is the source of truth of the actual state of the connection, [`Peerset`] must compensate for
+/// this and if it happens that inbound substream is opened for a peer that was marked outbound, it
+/// will attempt to allocate an inbound slot for the peer. If it fails to do so, the inbound
+/// substream is rejected and the peer is marked as canceled.
+///
+/// Since substream is not opened immediately, a peer can be disconnected even if the substream was
+/// not yet open. This can happen, for example, when a peer has connected over the syncing protocol
+/// and it was added to, e.g., GRANDPA's reserved peers, an outbound substream was opened
+/// ([`PeerState::Opening`]) and then the peer disconnected. This state transition is handled by the
+/// [`Peerset`] with `PeerState::Canceled` which indicates that should the substream open
+/// successfully, it should be closed immediately and if the connection is opened successfully while
+/// the peer was marked as canceled, the substream will be closed without notifying the protocol
+/// about the substream.
+#[derive(Debug, PartialEq, Eq)]
+pub enum PeerState {
+	/// No active connection to peer.
+	Disconnected,
+
+	/// Substream to peer was recently closed and the peer is currently backed off.
+	///
+	/// Backoff only applies to outbound substreams. Inbound substream will not experience any sort
+	/// of "banning" even if the peer is backed off and an inbound substream for the peer is
+	/// received.
+	Backoff,
+
+	/// Connection to peer is pending.
+	Opening {
+		/// Direction of the connection.
+		direction: Direction,
+	},
+
+	// Connected to peer.
+	Connected {
+		/// Is the peer inbound or outbound.
+		direction: Direction,
+	},
+
+	/// Substream was opened and while it was opening (no response had been heard from litep2p),
+	/// the substream was canceled by either calling `disconnect_peer()` or by removing peer
+	/// from the reserved set.
+	///
+	/// After the opened substream is acknowledged by litep2p (open success/failure), the peer is
+	/// moved to [`PeerState::Backoff`] from which it will then be moved to
+	/// [`PeerState::Disconnected`].
+	Canceled {
+		/// Is the peer inbound or outbound.
+		direction: Direction,
+	},
+
+	/// Connection to peer is closing.
+	///
+	/// State implies that the substream was asked to be closed by the local node and litep2p is
+	/// closing the substream. No command modifying the connection state is accepted until the
+	/// state has been set to [`PeerState::Disconnected`].
+	Closing {
+		/// Is the peer inbound or outbound.
+		direction: Direction,
+	},
+}
+
+/// `Peerset` implementation.
+///
+/// `Peerset` allows other subsystems of the blockchain to modify the connection state
+/// of the notification protocol by adding and removing reserved peers.
+///
+/// `Peerset` is also responsible for maintaining the desired amount of peers the protocol is
+/// connected to by establishing outbound connections and accepting/rejecting inbound connections.
+#[derive(Debug)]
+pub struct Peerset {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// RX channel for receiving commands.
+	cmd_rx: TracingUnboundedReceiver<PeersetCommand>,
+
+	/// Maximum number of outbound peers.
+	max_out: usize,
+
+	/// Current number of outbound peers.
+	num_out: usize,
+
+	/// Maximum number of inbound peers.
+	max_in: usize,
+
+	/// Current number of inbound peers.
+	num_in: usize,
+
+	/// Only connect to/accept connections from reserved peers.
+	reserved_only: bool,
+
+	/// Current reserved peer set.
+	reserved_peers: HashSet<PeerId>,
+
+	/// Handle to `Peerstore`.
+	peerstore_handle: Arc<dyn PeerStoreProvider>,
+
+	/// Peers.
+	peers: HashMap<PeerId, PeerState>,
+
+	/// Counter connected peers.
+	connected_peers: Arc<AtomicUsize>,
+
+	/// Pending backoffs for peers who recently disconnected.
+	pending_backoffs: FuturesUnordered<BoxFuture<'static, (PeerId, Reputation)>>,
+
+	/// Next time when [`Peerset`] should perform slot allocation.
+	next_slot_allocation: Delay,
+}
+
+macro_rules! decrement_or_warn {
+    ($slot:expr, $protocol:expr, $peer:expr, $direction:expr) => {{
+		match $slot.checked_sub(1) {
+			Some(value) => {
+				$slot = value;
+			}
+			None => {
+				log::warn!(
+					target: LOG_TARGET,
+					"{}: state mismatch, {:?} is not counted as part of {:?} slots",
+					$protocol, $peer, $direction
+				);
+				debug_assert!(false);
+			}
+		}
+    }};
+}
+
+/// Handle to [`Peerset`], given to `Peerstore`.
+#[derive(Debug)]
+struct PeersetHandle {
+	/// TX channel for sending commands to [`Peerset`].
+	tx: TracingUnboundedSender<PeersetCommand>,
+}
+
+impl ProtocolHandle for PeersetHandle {
+	/// Disconnect peer, as a result of a ban.
+	fn disconnect_peer(&self, peer: PeerId) {
+		let _ = self.tx.unbounded_send(PeersetCommand::DisconnectPeer { peer });
+	}
+}
+
+impl Peerset {
+	/// Create new [`Peerset`].
+	pub fn new(
+		protocol: ProtocolName,
+		max_out: usize,
+		max_in: usize,
+		reserved_only: bool,
+		reserved_peers: HashSet<PeerId>,
+		connected_peers: Arc<AtomicUsize>,
+		peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self, TracingUnboundedSender<PeersetCommand>) {
+		let (cmd_tx, cmd_rx) = tracing_unbounded("mpsc-peerset-protocol", 100_000);
+		let peers = reserved_peers
+			.iter()
+			.map(|peer| (*peer, PeerState::Disconnected))
+			.collect::<HashMap<_, _>>();
+
+		// register protocol's command channel to `Peerstore` so it can issue disconnect commands
+		// if some connected peer gets banned.
+		peerstore_handle.register_protocol(Arc::new(PeersetHandle { tx: cmd_tx.clone() }));
+
+		(
+			Self {
+				protocol,
+				max_out,
+				num_out: 0usize,
+				max_in,
+				num_in: 0usize,
+				reserved_peers,
+				cmd_rx,
+				peerstore_handle,
+				reserved_only,
+				peers,
+				connected_peers,
+				pending_backoffs: FuturesUnordered::new(),
+				next_slot_allocation: Delay::new(SLOT_ALLOCATION_FREQUENCY),
+			},
+			cmd_tx,
+		)
+	}
+
+	/// Report to [`Peerset`] that a substream was opened.
+	///
+	/// Slot for the stream was "preallocated" when it was initiated (outbound) or accepted
+	/// (inbound) by the local node which is why this function doesn't allocate a slot for the peer.
+	///
+	/// Returns `true` if the substream should be kept open and `false` if the substream had been
+	/// canceled while it was opening and litep2p should close the substream.
+	pub fn report_substream_opened(
+		&mut self,
+		peer: PeerId,
+		direction: traits::Direction,
+	) -> OpenResult {
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: substream opened to {peer:?}, direction {direction:?}, reserved peer {}",
+			self.protocol,
+			self.reserved_peers.contains(&peer),
+		);
+
+		let Some(state) = self.peers.get_mut(&peer) else {
+			log::warn!(target: LOG_TARGET, "{}: substream opened for unknown peer {peer:?}", self.protocol);
+			debug_assert!(false);
+			return OpenResult::Reject
+		};
+
+		match state {
+			PeerState::Opening { direction: substream_direction } => {
+				let real_direction: traits::Direction = (*substream_direction).into();
+
+				*state = PeerState::Connected { direction: *substream_direction };
+				self.connected_peers.fetch_add(1usize, Ordering::Relaxed);
+
+				return OpenResult::Accept { direction: real_direction }
+			},
+			// litep2p doesn't support the ability to cancel an opening substream so if the
+			// substream was closed while it was opening, it was marked as canceled and if the
+			// substream opens succesfully, it will be closed
+			PeerState::Canceled { direction: substream_direction } => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: substream to {peer:?} is canceled, issue disconnection request",
+					self.protocol,
+				);
+
+				self.connected_peers.fetch_add(1usize, Ordering::Relaxed);
+				*state = PeerState::Closing { direction: *substream_direction };
+
+				return OpenResult::Reject
+			},
+			state => {
+				panic!("{}: invalid state for open substream {peer:?} {state:?}", self.protocol);
+			},
+		}
+	}
+
+	/// Report to [`Peerset`] that a substream was closed.
+	///
+	/// If the peer was not a reserved peer, the inbound/outbound slot count is adjusted to account
+	/// for the disconnected peer. After the connection is closed, the peer is chilled for a
+	/// duration of [`DEFAULT_BACKOFF`] which prevens [`Peerset`] from establishing/accepting new
+	/// connections for that time period.
+	pub fn report_substream_closed(&mut self, peer: PeerId) {
+		log::trace!(target: LOG_TARGET, "{}: substream closed to {peer:?}", self.protocol);
+
+		let Some(state) = self.peers.get_mut(&peer) else {
+			log::warn!(target: LOG_TARGET, "{}: substream closed for unknown peer {peer:?}", self.protocol);
+			debug_assert!(false);
+			return
+		};
+
+		match &state {
+			// close was initiated either by remote ([`PeerState::Connected`]) or local node
+			// ([`PeerState::Closing`]) and it was a non-reserved peer
+			PeerState::Connected { direction: Direction::Inbound(Reserved::No) } |
+			PeerState::Closing { direction: Direction::Inbound(Reserved::No) } => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: inbound substream closed to non-reserved peer {peer:?}: {state:?}",
+					self.protocol,
+				);
+
+				decrement_or_warn!(
+					self.num_in,
+					peer,
+					self.protocol,
+					Direction::Inbound(Reserved::No)
+				);
+			},
+			// close was initiated either by remote ([`PeerState::Connected`]) or local node
+			// ([`PeerState::Closing`]) and it was a non-reserved peer
+			PeerState::Connected { direction: Direction::Outbound(Reserved::No) } |
+			PeerState::Closing { direction: Direction::Outbound(Reserved::No) } => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: outbound substream closed to non-reserved peer {peer:?} {state:?}",
+					self.protocol,
+				);
+
+				decrement_or_warn!(
+					self.num_out,
+					peer,
+					self.protocol,
+					Direction::Outbound(Reserved::No)
+				);
+			},
+			// reserved peers don't require adjustments to slot counts
+			PeerState::Closing { .. } | PeerState::Connected { .. } => {
+				log::debug!(target: LOG_TARGET, "{}: reserved peer {peer:?} disconnected", self.protocol);
+			},
+			state => {
+				log::warn!(target: LOG_TARGET, "{}: invalid state for disconnected peer {peer:?}: {state:?}", self.protocol);
+				debug_assert!(false);
+			},
+		}
+		*state = PeerState::Backoff;
+
+		self.connected_peers.fetch_sub(1usize, Ordering::Relaxed);
+		self.pending_backoffs.push(Box::pin(async move {
+			Delay::new(DEFAULT_BACKOFF).await;
+			(peer, DISCONNECT_ADJUSTMENT)
+		}));
+	}
+
+	/// Report to [`Peerset`] that an inbound substream was opened and that it should validate it.
+	pub fn report_inbound_substream(&mut self, peer: PeerId) -> ValidationResult {
+		log::trace!(target: LOG_TARGET, "{}: inbound substream from {peer:?}", self.protocol);
+
+		if self.peerstore_handle.is_banned(&peer) {
+			log::debug!(
+				target: LOG_TARGET,
+				"{}: rejecting banned peer {peer:?}",
+				self.protocol,
+			);
+
+			return ValidationResult::Reject;
+		}
+
+		let state = self.peers.entry(peer).or_insert(PeerState::Disconnected);
+		let is_reserved_peer = self.reserved_peers.contains(&peer);
+
+		match state {
+			// disconnected peers proceed directly to inbound slot allocation
+			PeerState::Disconnected => {},
+			// peer is backed off but if it can be accepted (either a reserved peer or inbound slot
+			// available), accept the peer and then just ignore the back-off timer when it expires
+			PeerState::Backoff =>
+				if !is_reserved_peer && self.num_in == self.max_in {
+					log::trace!(
+						target: LOG_TARGET,
+						"{}: ({peer:?}) is backed-off and cannot accept, reject inbound substream",
+						self.protocol,
+					);
+
+					return ValidationResult::Reject
+				},
+			// `Peerset` had initiated an outbound substream but litep2p had received an inbound
+			// substream before the command to open the substream was received, meaning local and
+			// remote desired to open a connection at the same time. Since outbound substreams
+			// cannot be canceled with litep2p and the command has already been registered, accept
+			// the inbound peer since the local node had wished a connection to be opened either way
+			// but keep the direction of the substream as it was (outbound).
+			//
+			// litep2p doesn't care what `Peerset` considers the substream direction to be and since
+			// it's used for bookkeeping for substream counts, keeping the substream direction
+			// unmodified simplies the implementation a lot. The direction would otherwise be
+			// irrelevant for protocols but because `SyncingEngine` has a hack to reject excess
+			// inbound substreams, that system has to be kept working for the time being. Once that
+			// issue is fixed, this approach can be re-evaluated if need be.
+			PeerState::Opening { direction: Direction::Outbound(reserved) } => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: inbound substream received for {peer:?} ({reserved:?}) that was marked outbound",
+					self.protocol,
+				);
+
+				return ValidationResult::Accept;
+			},
+			PeerState::Canceled { direction } => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: {peer:?} is canceled, rejecting substream",
+					self.protocol,
+				);
+
+				*state = PeerState::Canceled { direction: *direction };
+				return ValidationResult::Reject
+			},
+			state => {
+				log::warn!(
+					target: LOG_TARGET,
+					"{}: invalid state ({state:?}) for inbound substream, peer {peer:?}",
+					self.protocol
+				);
+				debug_assert!(false);
+				return ValidationResult::Reject
+			},
+		}
+
+		if is_reserved_peer {
+			log::trace!(
+				target: LOG_TARGET,
+				"{}: {peer:?} accepting peer as reserved peer",
+				self.protocol,
+			);
+
+			*state = PeerState::Opening { direction: Direction::Inbound(is_reserved_peer.into()) };
+			return ValidationResult::Accept
+		}
+
+		if self.num_in < self.max_in {
+			log::trace!(
+				target: LOG_TARGET,
+				"{}: {peer:?} accepting peer as regular peer",
+				self.protocol,
+			);
+
+			self.num_in += 1;
+
+			*state = PeerState::Opening { direction: Direction::Inbound(is_reserved_peer.into()) };
+			return ValidationResult::Accept
+		}
+
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: reject {peer:?}, not a reserved peer and no free inbound slots",
+			self.protocol,
+		);
+
+		*state = PeerState::Disconnected;
+		return ValidationResult::Reject
+	}
+
+	/// Report to [`Peerset`] that there was an error opening a substream.
+	pub fn report_substream_open_failure(&mut self, peer: PeerId, error: NotificationError) {
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: failed to open substream to {peer:?}: {error:?}",
+			self.protocol,
+		);
+
+		match self.peers.get(&peer) {
+			Some(PeerState::Opening { direction: Direction::Outbound(Reserved::No) }) => {
+				decrement_or_warn!(
+					self.num_out,
+					self.protocol,
+					peer,
+					Direction::Outbound(Reserved::No)
+				);
+			},
+			Some(PeerState::Opening { direction: Direction::Inbound(Reserved::No) }) => {
+				decrement_or_warn!(
+					self.num_in,
+					self.protocol,
+					peer,
+					Direction::Inbound(Reserved::No)
+				);
+			},
+			Some(PeerState::Canceled { direction }) => match direction {
+				Direction::Inbound(Reserved::No) => {
+					decrement_or_warn!(
+						self.num_in,
+						self.protocol,
+						peer,
+						Direction::Inbound(Reserved::No)
+					);
+				},
+				Direction::Outbound(Reserved::No) => {
+					decrement_or_warn!(
+						self.num_out,
+						self.protocol,
+						peer,
+						Direction::Outbound(Reserved::No)
+					);
+				},
+				_ => {},
+			},
+			// reserved peers do not require change in the slot counts
+			Some(PeerState::Opening { direction: Direction::Inbound(Reserved::Yes) }) |
+			Some(PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: substream open failure for reserved peer {peer:?}",
+					self.protocol,
+				);
+			},
+			state => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: substream open failure for a unknown state: {state:?}",
+					self.protocol,
+				);
+
+				return;
+			},
+		}
+
+		self.peers.insert(peer, PeerState::Backoff);
+		self.pending_backoffs.push(Box::pin(async move {
+			Delay::new(OPEN_FAILURE_BACKOFF).await;
+			(peer, OPEN_FAILURE_ADJUSTMENT)
+		}));
+	}
+
+	/// [`Peerset`] had accepted a peer but it was then rejected by the protocol.
+	pub fn report_substream_rejected(&mut self, peer: PeerId) {
+		log::trace!(target: LOG_TARGET, "{}: {peer:?} rejected by the protocol", self.protocol);
+
+		match self.peers.remove(&peer) {
+			Some(PeerState::Opening { direction }) => match direction {
+				Direction::Inbound(Reserved::Yes) | Direction::Outbound(Reserved::Yes) => {
+					log::warn!(
+						target: LOG_TARGET,
+						"{}: reserved peer {peer:?} rejected by the protocol",
+						self.protocol,
+					);
+					self.peers.insert(peer, PeerState::Disconnected);
+				},
+				Direction::Inbound(Reserved::No) => {
+					decrement_or_warn!(
+						self.num_in,
+						peer,
+						self.protocol,
+						Direction::Inbound(Reserved::No)
+					);
+					self.peers.insert(peer, PeerState::Disconnected);
+				},
+				Direction::Outbound(Reserved::No) => {
+					decrement_or_warn!(
+						self.num_out,
+						peer,
+						self.protocol,
+						Direction::Outbound(Reserved::No)
+					);
+					self.peers.insert(peer, PeerState::Disconnected);
+				},
+			},
+			Some(state @ PeerState::Canceled { .. }) => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: substream to {peer:?} rejected by protocol but already canceled",
+					self.protocol,
+				);
+
+				self.peers.insert(peer, state);
+			},
+			Some(state) => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: {peer:?} rejected by the protocol but not opening anymore: {state:?}",
+					self.protocol,
+				);
+
+				self.peers.insert(peer, state);
+			},
+			None => {},
+		}
+	}
+
+	/// Calculate how many of the connected peers were counted as normal inbound/outbound peers
+	/// which is needed to adjust slot counts when new reserved peers are added
+	fn calculate_slot_adjustment<'a>(
+		&'a mut self,
+		peers: impl Iterator<Item = &'a PeerId>,
+	) -> (usize, usize) {
+		peers.fold((0, 0), |(mut inbound, mut outbound), peer| {
+			match self.peers.get_mut(peer) {
+				Some(PeerState::Disconnected | PeerState::Backoff) => {},
+				Some(
+					PeerState::Opening { ref mut direction } |
+					PeerState::Connected { ref mut direction } |
+					PeerState::Canceled { ref mut direction } |
+					PeerState::Closing { ref mut direction },
+				) => {
+					*direction = match direction {
+						Direction::Inbound(Reserved::No) => {
+							inbound += 1;
+							Direction::Inbound(Reserved::Yes)
+						},
+						Direction::Outbound(Reserved::No) => {
+							outbound += 1;
+							Direction::Outbound(Reserved::Yes)
+						},
+						ref direction => **direction,
+					};
+				},
+				None => {
+					self.peers.insert(*peer, PeerState::Disconnected);
+				},
+			}
+
+			(inbound, outbound)
+		})
+	}
+
+	/// Get the number of inbound peers.
+	#[cfg(test)]
+	pub fn num_in(&self) -> usize {
+		self.num_in
+	}
+
+	/// Get the number of outbound peers.
+	#[cfg(test)]
+	pub fn num_out(&self) -> usize {
+		self.num_out
+	}
+
+	/// Get reference to known peers.
+	#[cfg(test)]
+	pub fn peers(&self) -> &HashMap<PeerId, PeerState> {
+		&self.peers
+	}
+
+	/// Get reference to reserved peers.
+	#[cfg(test)]
+	pub fn reserved_peers(&self) -> &HashSet<PeerId> {
+		&self.reserved_peers
+	}
+}
+
+impl Stream for Peerset {
+	type Item = PeersetNotificationCommand;
+
+	fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+		while let Poll::Ready(Some((peer, reputation))) = self.pending_backoffs.poll_next_unpin(cx)
+		{
+			log::trace!(target: LOG_TARGET, "{}: backoff expired for {peer:?}", self.protocol);
+
+			if std::matches!(self.peers.get(&peer), None | Some(PeerState::Backoff)) {
+				self.peers.insert(peer, PeerState::Disconnected);
+			}
+
+			self.peerstore_handle.report_peer(peer, reputation);
+		}
+
+		if let Poll::Ready(Some(action)) = Pin::new(&mut self.cmd_rx).poll_next(cx) {
+			match action {
+				PeersetCommand::DisconnectPeer { peer } if !self.reserved_peers.contains(&peer) =>
+					match self.peers.remove(&peer) {
+						Some(PeerState::Connected { direction }) => {
+							log::trace!(
+								target: LOG_TARGET,
+								"{}: close connection to {peer:?}, direction {direction:?}",
+								self.protocol,
+							);
+
+							self.peers.insert(peer, PeerState::Closing { direction });
+							return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream {
+								peers: vec![peer],
+							}))
+						},
+						Some(PeerState::Backoff) => {
+							log::trace!(
+								target: LOG_TARGET,
+								"{}: cannot disconnect {peer:?}, already backed-off",
+								self.protocol,
+							);
+
+							self.peers.insert(peer, PeerState::Backoff);
+						},
+						// substream might have been opening but not yet fully open when the
+						// protocol or `Peerstore` request the connection to be closed
+						//
+						// if the substream opens successfully, close it immediately and mark the
+						// peer as `Disconnected`
+						Some(PeerState::Opening { direction }) => {
+							log::trace!(
+								target: LOG_TARGET,
+								"{}: canceling substream to disconnect peer {peer:?}",
+								self.protocol,
+							);
+
+							self.peers.insert(peer, PeerState::Canceled { direction });
+						},
+						// protocol had issued two disconnection requests in rapid succession and
+						// the substream hadn't closed before the second disconnection request was
+						// received, this is harmless and can be ignored.
+						Some(state @ PeerState::Closing { .. }) => {
+							log::trace!(
+								target: LOG_TARGET,
+								"{}: cannot disconnect {peer:?}, already closing ({state:?})",
+								self.protocol,
+							);
+
+							self.peers.insert(peer, state);
+						},
+						// if peer is banned, e.g. due to genesis mismatch, `Peerstore` will issue a
+						// global disconnection request to all protocols, irrespective of the
+						// connectivity state. Peer isn't necessarily connected to all protocols at
+						// all times so this is a harmless state to be in if a disconnection request
+						// is received.
+						Some(state @ PeerState::Disconnected) => {
+							self.peers.insert(peer, state);
+						},
+						// peer had an opening substream earlier which was canceled and then,
+						// e.g., the peer was banned which caused it to be disconnected again
+						Some(state @ PeerState::Canceled { .. }) => {
+							log::debug!(
+								target: LOG_TARGET,
+								"{}: cannot disconnect {peer:?}, already canceled ({state:?})",
+								self.protocol,
+							);
+
+							self.peers.insert(peer, state);
+						},
+						// peer doesn't exist
+						//
+						// this can happen, for example, when peer connects over
+						// `/block-announces/1` and it has wrong genesis hash which initiates a ban
+						// for that peer. Since the ban is reported to all protocols but the peer
+						// mightn't have been registered to GRANDPA or transactions yet, the peer
+						// doesn't exist in their `Peerset`s and the error can just be ignored.
+						None => {
+							log::debug!(target: LOG_TARGET, "{}: {peer:?} doesn't exist", self.protocol);
+						},
+					},
+				PeersetCommand::DisconnectPeer { peer } => {
+					log::debug!(
+						target: LOG_TARGET,
+						"{}: ignoring disconnection request for reserved peer {peer}",
+						self.protocol,
+					);
+				},
+				// set new reserved peers for the protocol
+				//
+				// current reserved peers not in the new set are disconnected and the new reserved
+				// peers are scheduled for outbound substreams
+				PeersetCommand::SetReservedPeers { peers } => {
+					log::debug!(target: LOG_TARGET, "{}: set reserved peers {peers:?}", self.protocol);
+
+					// reserved peers don't consume any slots so if there are any regular connected
+					// peers, inbound/outbound slot count must be adjusted to not account for these
+					// peers anymore
+					//
+					// calculate how many of the previously connected peers were counted as regular
+					// peers and substract these counts from `num_out`/`num_in`
+					let (in_peers, out_peers) = self.calculate_slot_adjustment(peers.iter());
+					self.num_out -= out_peers;
+					self.num_in -= in_peers;
+
+					// add all unknown peers to `self.peers`
+					peers.iter().for_each(|peer| {
+						if !self.peers.contains_key(peer) {
+							self.peers.insert(*peer, PeerState::Disconnected);
+						}
+					});
+
+					// collect all peers who are not in the new reserved set
+					let peers_to_remove = self
+						.peers
+						.iter()
+						.filter_map(|(peer, _)| (!peers.contains(peer)).then_some(*peer))
+						.collect::<HashSet<_>>();
+
+					self.reserved_peers = peers;
+
+					let peers = peers_to_remove
+						.into_iter()
+						.filter(|peer| {
+							match self.peers.remove(&peer) {
+								Some(PeerState::Connected { direction }) => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: close connection to {peer:?}, direction {direction:?}",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Closing { direction });
+									true
+								},
+								// substream might have been opening but not yet fully open when
+								// the protocol request the reserved set to be changed
+								Some(PeerState::Opening { direction }) => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: cancel substream to {peer:?}, direction {direction:?}",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Canceled { direction });
+									false
+								},
+								Some(state) => {
+									self.peers.insert(*peer, state);
+									false
+								},
+								None => {
+									log::debug!(target: LOG_TARGET, "{}: {peer:?} doesn't exist", self.protocol);
+									debug_assert!(false);
+									false
+								},
+							}
+						})
+						.collect();
+
+					log::trace!(
+						target: LOG_TARGET,
+						"{}: close substreams to {peers:?}",
+						self.protocol,
+					);
+
+					return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream { peers }))
+				},
+				PeersetCommand::AddReservedPeers { peers } => {
+					log::debug!(target: LOG_TARGET, "{}: add reserved peers {peers:?}", self.protocol);
+
+					// reserved peers don't consume any slots so if there are any regular connected
+					// peers, inbound/outbound slot count must be adjusted to not account for these
+					// peers anymore
+					//
+					// calculate how many of the previously connected peers were counted as regular
+					// peers and substract these counts from `num_out`/`num_in`
+					let (in_peers, out_peers) = self.calculate_slot_adjustment(peers.iter());
+					self.num_out -= out_peers;
+					self.num_in -= in_peers;
+
+					let peers = peers
+						.iter()
+						.filter_map(|peer| {
+							if !self.reserved_peers.insert(*peer) {
+								log::warn!(
+									target: LOG_TARGET,
+									"{}: {peer:?} is already a reserved peer",
+									self.protocol,
+								);
+								return None
+							}
+
+							std::matches!(
+								self.peers.get_mut(peer),
+								None | Some(PeerState::Disconnected)
+							)
+							.then(|| {
+								self.peers.insert(
+									*peer,
+									PeerState::Opening {
+										direction: Direction::Outbound(Reserved::Yes),
+									},
+								);
+								*peer
+							})
+						})
+						.collect();
+
+					log::debug!(target: LOG_TARGET, "{}: start connecting to {peers:?}", self.protocol);
+
+					return Poll::Ready(Some(PeersetNotificationCommand::OpenSubstream { peers }))
+				},
+				PeersetCommand::RemoveReservedPeers { peers } => {
+					log::debug!(target: LOG_TARGET, "{}: remove reserved peers {peers:?}", self.protocol);
+
+					let peers_to_remove = peers
+						.iter()
+						.filter_map(|peer| {
+							if !self.reserved_peers.remove(peer) {
+								log::debug!(
+									target: LOG_TARGET,
+									"{}: {peer} is not a reserved peer",
+									self.protocol,
+								);
+								return None
+							}
+
+							match self.peers.remove(peer)? {
+								// peer might have already disconnected by the time request to
+								// disconnect them was received and the peer was backed off but
+								// it had no expired by the time the request to disconnect the
+								// peer was received
+								PeerState::Backoff => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: cannot disconnect removed reserved peer {peer:?}, already backed-off",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Backoff);
+									None
+								},
+								// if there is a rapid change in substream state, the peer may
+								// be canceled when the substream is asked to be closed.
+								//
+								// this can happen if substream is first opened and the very
+								// soon after canceled. The substream may not have had time to
+								// open yet and second open is ignored. If the substream is now
+								// closed again before it has had time to open, it will be in
+								// canceled state since `Peerset` is still waiting to hear
+								// either success/failure on the original substream it tried to
+								// cancel.
+								PeerState::Canceled { direction } => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: cannot disconnect removed reserved peer {peer:?}, already canceled",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Canceled { direction });
+									None
+								},
+								// substream to the peer might have failed to open which caused
+								// the peer to be backed off
+								//
+								// the back-off might've expired by the time the peer was
+								// disconnected at which point the peer is already disconnected
+								// when the protocol asked the peer to be disconnected
+								PeerState::Disconnected => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: cannot disconnect removed reserved peer {peer:?}, already disconnected",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Disconnected);
+									None
+								},
+								// if a node disconnects, it's put into `PeerState::Closing`
+								// which indicates that `Peerset` wants the substream closed and
+								// has asked litep2p to close it but it hasn't yet received a
+								// confirmation. If the peer is added as a reserved peer while
+								// the substream is closing, the peer will remain in the closing
+								// state as `Peerset` can't do anything with the peer until it
+								// has heard from litep2p. It's possible that the peer is then
+								// removed from the reserved set before substream close event
+								// has been reported to `Peerset` (which the code below is
+								// handling) and it will once again be ignored until the close
+								// event is heard from litep2p.
+								PeerState::Closing { direction } => {
+									log::trace!(
+										target: LOG_TARGET,
+										"{}: cannot disconnect removed reserved peer {peer:?}, already closing",
+										self.protocol,
+									);
+
+									self.peers.insert(*peer, PeerState::Closing { direction });
+									None
+								},
+								// peer is currently connected as a reserved peer
+								//
+								// check if the peer can be accepted as a regular peer based on its
+								// substream direction and available slots
+								//
+								// if there are enough slots, the peer is just converted to
+								// a regular peer and the used slot count is increased and if the
+								// peer cannot be accepted, litep2p is asked to close the substream.
+								PeerState::Connected { direction } => match direction {
+									Direction::Inbound(_) => match self.num_in < self.max_in {
+										true => {
+											log::trace!(
+												target: LOG_TARGET,
+												"{}: {peer:?} converted to regular inbound peer (inbound open)",
+												self.protocol,
+											);
+
+											self.num_in += 1;
+											self.peers.insert(
+												*peer,
+												PeerState::Connected {
+													direction: Direction::Inbound(Reserved::No),
+												},
+											);
+
+											None
+										},
+										false => {
+											self.peers.insert(
+												*peer,
+												PeerState::Closing {
+													direction: Direction::Inbound(Reserved::Yes),
+												},
+											);
+
+											Some(*peer)
+										},
+									},
+									Direction::Outbound(_) => match self.num_out < self.max_out {
+										true => {
+											log::trace!(
+												target: LOG_TARGET,
+												"{}: {peer:?} converted to regular outbound peer (outbound open)",
+												self.protocol,
+											);
+
+											self.num_out += 1;
+											self.peers.insert(
+												*peer,
+												PeerState::Connected {
+													direction: Direction::Outbound(Reserved::No),
+												},
+											);
+
+											None
+										},
+										false => {
+											self.peers.insert(
+												*peer,
+												PeerState::Closing {
+													direction: Direction::Outbound(Reserved::Yes),
+												},
+											);
+
+											Some(*peer)
+										},
+									},
+								},
+								PeerState::Opening { direction } => match direction {
+									Direction::Inbound(_) => match self.num_in < self.max_in {
+										true => {
+											log::trace!(
+												target: LOG_TARGET,
+												"{}: {peer:?} converted to regular inbound peer (inbound opening)",
+												self.protocol,
+											);
+
+											self.num_in += 1;
+											self.peers.insert(
+												*peer,
+												PeerState::Opening {
+													direction: Direction::Inbound(Reserved::No),
+												},
+											);
+
+											None
+										},
+										false => {
+											self.peers.insert(
+												*peer,
+												PeerState::Canceled {
+													direction: Direction::Inbound(Reserved::Yes),
+												},
+											);
+
+											None
+										},
+									},
+									Direction::Outbound(_) => match self.num_out < self.max_out {
+										true => {
+											log::trace!(
+												target: LOG_TARGET,
+												"{}: {peer:?} converted to regular outbound peer (outbound opening)",
+												self.protocol,
+											);
+
+											self.num_out += 1;
+											self.peers.insert(
+												*peer,
+												PeerState::Opening {
+													direction: Direction::Outbound(Reserved::No),
+												},
+											);
+
+											None
+										},
+										false => {
+											self.peers.insert(
+												*peer,
+												PeerState::Canceled {
+													direction: Direction::Outbound(Reserved::Yes),
+												},
+											);
+
+											None
+										},
+									},
+								},
+							}
+						})
+						.collect();
+
+					log::debug!(
+						target: LOG_TARGET,
+						"{}: close substreams to {peers_to_remove:?}",
+						self.protocol,
+					);
+
+					return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream {
+						peers: peers_to_remove,
+					}))
+				},
+				PeersetCommand::SetReservedOnly { reserved_only } => {
+					log::debug!(target: LOG_TARGET, "{}: set reserved only mode to {reserved_only}", self.protocol);
+
+					// update mode and if it's set to true, disconnect all non-reserved peers
+					self.reserved_only = reserved_only;
+
+					if reserved_only {
+						let peers_to_remove = self
+							.peers
+							.iter()
+							.filter_map(|(peer, state)| {
+								(!self.reserved_peers.contains(peer) &&
+									std::matches!(state, PeerState::Connected { .. }))
+								.then_some(*peer)
+							})
+							.collect::<Vec<_>>();
+
+						// set peers to correct states
+
+						// peers who are connected are move to [`PeerState::Closing`]
+						// and peers who are already opening are moved to [`PeerState::Canceled`]
+						// and if the substream for them opens, it will be closed right after.
+						self.peers.iter_mut().for_each(|(_, state)| match state {
+							PeerState::Connected { direction } => {
+								*state = PeerState::Closing { direction: *direction };
+							},
+							// peer for whom a substream was opening are canceled and if the
+							// substream opens successfully, it will be closed immediately
+							PeerState::Opening { direction } => {
+								*state = PeerState::Canceled { direction: *direction };
+							},
+							_ => {},
+						});
+
+						return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream {
+							peers: peers_to_remove,
+						}))
+					}
+				},
+				PeersetCommand::GetReservedPeers { tx } => {
+					let _ = tx.send(self.reserved_peers.iter().cloned().collect());
+				},
+			}
+		}
+
+		// periodically check if `Peerset` is currently not connected to some reserved peers
+		// it should be connected to
+		//
+		// also check if there are free outbound slots and if so, fetch peers with highest
+		// reputations from `Peerstore` and start opening substreams to these peers
+		if let Poll::Ready(()) = Pin::new(&mut self.next_slot_allocation).poll(cx) {
+			let mut connect_to = self
+				.peers
+				.iter()
+				.filter_map(|(peer, state)| {
+					(self.reserved_peers.contains(peer) &&
+						std::matches!(state, PeerState::Disconnected) &&
+						!self.peerstore_handle.is_banned(peer))
+					.then_some(*peer)
+				})
+				.collect::<Vec<_>>();
+
+			connect_to.iter().for_each(|peer| {
+				self.peers.insert(
+					*peer,
+					PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) },
+				);
+			});
+
+			// if the number of outbound peers is lower than the desired amount of outbound peers,
+			// query `PeerStore` and try to get a new outbound candidated.
+			if self.num_out < self.max_out && !self.reserved_only {
+				let ignore: HashSet<PeerId> = self
+					.peers
+					.iter()
+					.filter_map(|(peer, state)| {
+						(!std::matches!(state, PeerState::Disconnected)).then_some(*peer)
+					})
+					.collect();
+
+				let peers: Vec<_> =
+					self.peerstore_handle.outgoing_candidates(self.max_out - self.num_out, ignore);
+
+				if peers.len() > 0 {
+					peers.iter().for_each(|peer| {
+						self.peers.insert(
+							*peer,
+							PeerState::Opening { direction: Direction::Outbound(Reserved::No) },
+						);
+					});
+
+					self.num_out += peers.len();
+					connect_to.extend(peers);
+				}
+			}
+
+			// start timer for the next allocation and if there were peers which the `Peerset`
+			// wasn't connected but should be, send command to litep2p to start opening substreams.
+			self.next_slot_allocation = Delay::new(SLOT_ALLOCATION_FREQUENCY);
+
+			if !connect_to.is_empty() {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: start connecting to peers {connect_to:?}",
+					self.protocol,
+				);
+
+				return Poll::Ready(Some(PeersetNotificationCommand::OpenSubstream {
+					peers: connect_to,
+				}))
+			}
+		}
+
+		Poll::Pending
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/notification/tests/fuzz.rs b/substrate/client/network/src/litep2p/shim/notification/tests/fuzz.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d0970e89cf3457f601269ff41583c6090f5043a4
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/tests/fuzz.rs
@@ -0,0 +1,384 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Fuzz test emulates network events and peer connection handling by `Peerset`
+//! and `PeerStore` to discover possible inconsistencies in peer management.
+
+use crate::{
+	litep2p::{
+		peerstore::Peerstore,
+		shim::notification::peerset::{
+			OpenResult, Peerset, PeersetCommand, PeersetNotificationCommand,
+		},
+	},
+	service::traits::{Direction, PeerStore, ValidationResult},
+	ProtocolName,
+};
+
+use futures::{FutureExt, StreamExt};
+use litep2p::protocol::notification::NotificationError;
+use rand::{
+	distributions::{Distribution, Uniform, WeightedIndex},
+	seq::IteratorRandom,
+};
+
+use sc_network_common::types::ReputationChange;
+use sc_network_types::PeerId;
+
+use std::{
+	collections::{HashMap, HashSet},
+	sync::Arc,
+};
+
+#[tokio::test]
+#[cfg(debug_assertions)]
+async fn run() {
+	sp_tracing::try_init_simple();
+
+	for _ in 0..50 {
+		test_once().await;
+	}
+}
+
+#[cfg(debug_assertions)]
+async fn test_once() {
+	// PRNG to use.
+	let mut rng = rand::thread_rng();
+
+	// peers that the peerset knows about.
+	let mut known_peers = HashSet::<PeerId>::new();
+
+	// peers that we have reserved. Always a subset of `known_peers`.
+	let mut reserved_peers = HashSet::<PeerId>::new();
+
+	// reserved only mode
+	let mut reserved_only = Uniform::new_inclusive(0, 10).sample(&mut rng) == 0;
+
+	// Bootnodes for `PeerStore` initialization.
+	let bootnodes = (0..Uniform::new_inclusive(0, 4).sample(&mut rng))
+		.map(|_| {
+			let id = PeerId::random();
+			known_peers.insert(id);
+			id
+		})
+		.collect();
+
+	let peerstore = Peerstore::new(bootnodes);
+	let peer_store_handle = peerstore.handle();
+
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		Uniform::new_inclusive(0, 25).sample(&mut rng),
+		Uniform::new_inclusive(0, 25).sample(&mut rng),
+		reserved_only,
+		(0..Uniform::new_inclusive(0, 2).sample(&mut rng))
+			.map(|_| {
+				let id = PeerId::random();
+				known_peers.insert(id);
+				reserved_peers.insert(id);
+				id
+			})
+			.collect(),
+		Default::default(),
+		Arc::clone(&peer_store_handle),
+	);
+
+	tokio::spawn(peerstore.run());
+
+	// opening substreams
+	let mut opening = HashMap::<PeerId, Direction>::new();
+
+	// open substreams
+	let mut open = HashMap::<PeerId, Direction>::new();
+
+	// closing substreams
+	let mut closing = HashSet::<PeerId>::new();
+
+	// closed substreams
+	let mut closed = HashSet::<PeerId>::new();
+
+	// perform a certain number of actions while checking that the state is consistent.
+	//
+	// if we reach the end of the loop, the run has succeeded
+	let _ = tokio::task::spawn_blocking(move || {
+		// PRNG to use in `spawn_blocking` context.
+		let mut rng = rand::thread_rng();
+
+		for _ in 0..2500 {
+			// each of these weights corresponds to an action that we may perform
+			let action_weights =
+				[300, 110, 110, 110, 110, 90, 70, 30, 110, 110, 110, 110, 20, 110, 50, 110];
+
+			match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) {
+				0 => match peerset.next().now_or_never() {
+					// open substreams to `peers`
+					Some(Some(PeersetNotificationCommand::OpenSubstream { peers })) =>
+						for peer in peers {
+							opening.insert(peer, Direction::Outbound);
+							closed.remove(&peer);
+
+							assert!(!closing.contains(&peer));
+							assert!(!open.contains_key(&peer));
+						},
+					// close substreams to `peers`
+					Some(Some(PeersetNotificationCommand::CloseSubstream { peers })) =>
+						for peer in peers {
+							assert!(closing.insert(peer));
+							assert!(open.remove(&peer).is_some());
+							assert!(!opening.contains_key(&peer));
+						},
+					Some(None) => panic!("peerset exited"),
+					None => {},
+				},
+				// get inbound connection from an unknown peer
+				1 => {
+					let new_peer = PeerId::random();
+					peer_store_handle.add_known_peer(new_peer);
+
+					match peerset.report_inbound_substream(new_peer) {
+						ValidationResult::Accept => {
+							opening.insert(new_peer, Direction::Inbound);
+						},
+						ValidationResult::Reject => {},
+					}
+				},
+				// substream opened successfully
+				//
+				// remove peer from `opening` (which contains its direction), report the open
+				// substream to `Peerset` and move peer state to `open`.
+				//
+				// if the substream was canceled while it was opening, move peer to `closing`
+				2 =>
+					if let Some(peer) = opening.keys().choose(&mut rng).copied() {
+						let direction = opening.remove(&peer).unwrap();
+						match peerset.report_substream_opened(peer, direction) {
+							OpenResult::Accept { .. } => {
+								assert!(open.insert(peer, direction).is_none());
+							},
+							OpenResult::Reject => {
+								assert!(closing.insert(peer));
+							},
+						}
+					},
+				// substream failed to open
+				3 =>
+					if let Some(peer) = opening.keys().choose(&mut rng).copied() {
+						let _ = opening.remove(&peer).unwrap();
+						peerset.report_substream_open_failure(peer, NotificationError::Rejected);
+					},
+				// substream was closed by remote peer
+				4 =>
+					if let Some(peer) = open.keys().choose(&mut rng).copied() {
+						let _ = open.remove(&peer).unwrap();
+						peerset.report_substream_closed(peer);
+						assert!(closed.insert(peer));
+					},
+				// substream was closed by local node
+				5 =>
+					if let Some(peer) = closing.iter().choose(&mut rng).copied() {
+						assert!(closing.remove(&peer));
+						assert!(closed.insert(peer));
+						peerset.report_substream_closed(peer);
+					},
+				// random connected peer was disconnected by the protocol
+				6 =>
+					if let Some(peer) = open.keys().choose(&mut rng).copied() {
+						to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
+					},
+				// ban random peer
+				7 =>
+					if let Some(peer) = known_peers.iter().choose(&mut rng).copied() {
+						peer_store_handle.report_peer(peer, ReputationChange::new_fatal(""));
+					},
+				// inbound substream is received for a peer that was considered
+				// outbound
+				8 => {
+					let outbound_peers = opening
+						.iter()
+						.filter_map(|(peer, direction)| {
+							std::matches!(direction, Direction::Outbound).then_some(*peer)
+						})
+						.collect::<HashSet<_>>();
+
+					if let Some(peer) = outbound_peers.iter().choose(&mut rng).copied() {
+						match peerset.report_inbound_substream(peer) {
+							ValidationResult::Accept => {
+								opening.insert(peer, Direction::Inbound);
+							},
+							ValidationResult::Reject => {},
+						}
+					}
+				},
+				// set reserved peers
+				//
+				// choose peers from all available sets (open, opening, closing, closed) + some new
+				// peers
+				9 => {
+					let num_open = Uniform::new_inclusive(0, open.len()).sample(&mut rng);
+					let num_opening = Uniform::new_inclusive(0, opening.len()).sample(&mut rng);
+					let num_closing = Uniform::new_inclusive(0, closing.len()).sample(&mut rng);
+					let num_closed = Uniform::new_inclusive(0, closed.len()).sample(&mut rng);
+
+					let peers = open
+						.keys()
+						.copied()
+						.choose_multiple(&mut rng, num_open)
+						.into_iter()
+						.chain(
+							opening
+								.keys()
+								.copied()
+								.choose_multiple(&mut rng, num_opening)
+								.into_iter(),
+						)
+						.chain(
+							closing
+								.iter()
+								.copied()
+								.choose_multiple(&mut rng, num_closing)
+								.into_iter(),
+						)
+						.chain(
+							closed
+								.iter()
+								.copied()
+								.choose_multiple(&mut rng, num_closed)
+								.into_iter(),
+						)
+						.chain((0..5).map(|_| {
+							let peer = PeerId::random();
+							known_peers.insert(peer);
+							peer_store_handle.add_known_peer(peer);
+							peer
+						}))
+						.filter(|peer| !reserved_peers.contains(peer))
+						.collect::<HashSet<_>>();
+
+					reserved_peers.extend(peers.clone().into_iter());
+					to_peerset.unbounded_send(PeersetCommand::SetReservedPeers { peers }).unwrap();
+				},
+				// add reserved peers
+				10 => {
+					let num_open = Uniform::new_inclusive(0, open.len()).sample(&mut rng);
+					let num_opening = Uniform::new_inclusive(0, opening.len()).sample(&mut rng);
+					let num_closing = Uniform::new_inclusive(0, closing.len()).sample(&mut rng);
+					let num_closed = Uniform::new_inclusive(0, closed.len()).sample(&mut rng);
+
+					let peers = open
+						.keys()
+						.copied()
+						.choose_multiple(&mut rng, num_open)
+						.into_iter()
+						.chain(
+							opening
+								.keys()
+								.copied()
+								.choose_multiple(&mut rng, num_opening)
+								.into_iter(),
+						)
+						.chain(
+							closing
+								.iter()
+								.copied()
+								.choose_multiple(&mut rng, num_closing)
+								.into_iter(),
+						)
+						.chain(
+							closed
+								.iter()
+								.copied()
+								.choose_multiple(&mut rng, num_closed)
+								.into_iter(),
+						)
+						.chain((0..5).map(|_| {
+							let peer = PeerId::random();
+							known_peers.insert(peer);
+							peer_store_handle.add_known_peer(peer);
+							peer
+						}))
+						.filter(|peer| !reserved_peers.contains(peer))
+						.collect::<HashSet<_>>();
+
+					reserved_peers.extend(peers.clone().into_iter());
+					to_peerset.unbounded_send(PeersetCommand::AddReservedPeers { peers }).unwrap();
+				},
+				// remove reserved peers
+				11 => {
+					let num_to_remove =
+						Uniform::new_inclusive(0, reserved_peers.len()).sample(&mut rng);
+					let peers = reserved_peers
+						.iter()
+						.copied()
+						.choose_multiple(&mut rng, num_to_remove)
+						.into_iter()
+						.collect::<HashSet<_>>();
+
+					peers.iter().for_each(|peer| {
+						assert!(reserved_peers.remove(peer));
+					});
+
+					to_peerset
+						.unbounded_send(PeersetCommand::RemoveReservedPeers { peers })
+						.unwrap();
+				},
+				// set reserved only
+				12 => {
+					reserved_only = !reserved_only;
+
+					let _ = to_peerset
+						.unbounded_send(PeersetCommand::SetReservedOnly { reserved_only });
+				},
+				//
+				// discover a new node.
+				13 => {
+					let new_peer = PeerId::random();
+					known_peers.insert(new_peer);
+					peer_store_handle.add_known_peer(new_peer);
+				},
+				// protocol rejected a substream that was accepted by `Peerset`
+				14 => {
+					let inbound_peers = opening
+						.iter()
+						.filter_map(|(peer, direction)| {
+							std::matches!(direction, Direction::Inbound).then_some(*peer)
+						})
+						.collect::<HashSet<_>>();
+
+					if let Some(peer) = inbound_peers.iter().choose(&mut rng).copied() {
+						peerset.report_substream_rejected(peer);
+						opening.remove(&peer);
+					}
+				},
+				// inbound substream received for a peer in `closed`
+				15 =>
+					if let Some(peer) = closed.iter().choose(&mut rng).copied() {
+						match peerset.report_inbound_substream(peer) {
+							ValidationResult::Accept => {
+								assert!(closed.remove(&peer));
+								opening.insert(peer, Direction::Inbound);
+							},
+							ValidationResult::Reject => {},
+						}
+					},
+				_ => unreachable!(),
+			}
+		}
+	})
+	.await
+	.unwrap();
+}
diff --git a/substrate/client/network/src/litep2p/shim/notification/tests/mod.rs b/substrate/client/network/src/litep2p/shim/notification/tests/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a303862e7c7b48ec09a1dc846662654bd85bb3ae
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/tests/mod.rs
@@ -0,0 +1,22 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+#[cfg(test)]
+mod fuzz;
+#[cfg(test)]
+mod peerset;
diff --git a/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4f7bfffaa1fc6cdf677188449dd67e97695ff614
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs
@@ -0,0 +1,891 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use crate::{
+	litep2p::{
+		peerstore::peerstore_handle_test,
+		shim::notification::peerset::{
+			Direction, OpenResult, PeerState, Peerset, PeersetCommand, PeersetNotificationCommand,
+			Reserved,
+		},
+	},
+	service::traits::{self, ValidationResult},
+	ProtocolName,
+};
+
+use futures::prelude::*;
+use litep2p::protocol::notification::NotificationError;
+
+use sc_network_types::PeerId;
+
+use std::{
+	collections::HashSet,
+	sync::{atomic::Ordering, Arc},
+	task::Poll,
+};
+
+// outbound substream was initiated for a peer but an inbound substream from that same peer
+// was receied while the `Peerset` was waiting for the outbound substream to be opened
+//
+// verify that the peer state is updated correctly
+#[tokio::test]
+async fn inbound_substream_for_outbound_peer() {
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let peers = (0..3)
+		.map(|_| {
+			let peer = PeerId::random();
+			peerstore_handle.add_known_peer(peer);
+			peer
+		})
+		.collect::<Vec<_>>();
+	let inbound_peer = *peers.iter().next().unwrap();
+
+	let (mut peerset, _to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		false,
+		Default::default(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(out_peers.len(), 3usize);
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 3usize);
+			assert_eq!(
+				peerset.peers().get(&inbound_peer),
+				Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
+			);
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// inbound substream was received from peer who was marked outbound
+	//
+	// verify that the peer state and inbound/outbound counts are updated correctly
+	assert_eq!(peerset.report_inbound_substream(inbound_peer), ValidationResult::Accept);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 3usize);
+	assert_eq!(
+		peerset.peers().get(&inbound_peer),
+		Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
+	);
+}
+
+// substream was opening to peer but then it was canceled and before the substream
+// was fully closed, the peer got banned
+#[tokio::test]
+async fn canceled_peer_gets_banned() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		0,
+		0,
+		true,
+		peers.clone(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+
+			for outbound_peer in &out_peers {
+				assert!(peers.contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// remove all reserved peers
+	to_peerset
+		.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert!(out_peers.is_empty());
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify all reserved peers are canceled
+	for (_, state) in peerset.peers() {
+		assert_eq!(state, &PeerState::Canceled { direction: Direction::Outbound(Reserved::Yes) });
+	}
+}
+
+#[tokio::test]
+async fn peer_added_and_removed_from_peerset() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		0,
+		0,
+		true,
+		Default::default(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	// add peers to reserved set
+	let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+	to_peerset
+		.unbounded_send(PeersetCommand::AddReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+
+			for outbound_peer in &out_peers {
+				assert!(peers.contains(outbound_peer));
+				assert!(peerset.reserved_peers().contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// report that all substreams were opened
+	for peer in &peers {
+		assert!(std::matches!(
+			peerset.report_substream_opened(*peer, traits::Direction::Outbound),
+			OpenResult::Accept { .. }
+		));
+		assert_eq!(
+			peerset.peers().get(peer),
+			Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+		);
+	}
+
+	// remove all reserved peers
+	to_peerset
+		.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert!(!out_peers.is_empty());
+
+			for peer in &out_peers {
+				assert!(peers.contains(peer));
+				assert!(!peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// add the peers again and verify that the command is ignored because the substreams are closing
+	to_peerset
+		.unbounded_send(PeersetCommand::AddReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert!(out_peers.is_empty());
+
+			for peer in &peers {
+				assert!(peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// remove the peers again and verify the state remains as `Closing`
+	to_peerset
+		.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert!(out_peers.is_empty());
+
+			for peer in &peers {
+				assert!(!peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn set_reserved_peers() {
+	sp_tracing::try_init_simple();
+
+	let reserved = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		true,
+		reserved.clone(),
+		Default::default(),
+		Arc::new(peerstore_handle_test()),
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+
+			for outbound_peer in &out_peers {
+				assert!(reserved.contains(outbound_peer));
+				assert!(peerset.reserved_peers().contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// report that all substreams were opened
+	for peer in &reserved {
+		assert!(std::matches!(
+			peerset.report_substream_opened(*peer, traits::Direction::Outbound),
+			OpenResult::Accept { .. }
+		));
+		assert_eq!(
+			peerset.peers().get(peer),
+			Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+		);
+	}
+
+	// add a totally new set of reserved peers
+	let new_reserved_peers =
+		HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+	to_peerset
+		.unbounded_send(PeersetCommand::SetReservedPeers { peers: new_reserved_peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert!(!out_peers.is_empty());
+			assert_eq!(out_peers.len(), 3);
+
+			for peer in &out_peers {
+				assert!(reserved.contains(peer));
+				assert!(!peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+
+			for peer in &new_reserved_peers {
+				assert!(peerset.reserved_peers().contains(peer));
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert!(!out_peers.is_empty());
+			assert_eq!(out_peers.len(), 3);
+
+			for peer in &new_reserved_peers {
+				assert!(peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn set_reserved_peers_one_peer_already_in_the_set() {
+	sp_tracing::try_init_simple();
+
+	let reserved = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+	let common_peer = *reserved.iter().next().unwrap();
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		true,
+		reserved.clone(),
+		Default::default(),
+		Arc::new(peerstore_handle_test()),
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+
+			for outbound_peer in &out_peers {
+				assert!(reserved.contains(outbound_peer));
+				assert!(peerset.reserved_peers().contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// report that all substreams were opened
+	for peer in &reserved {
+		assert!(std::matches!(
+			peerset.report_substream_opened(*peer, traits::Direction::Outbound),
+			OpenResult::Accept { .. }
+		));
+		assert_eq!(
+			peerset.peers().get(peer),
+			Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+		);
+	}
+
+	// add a new set of reserved peers with one peer from the original set
+	let new_reserved_peers = HashSet::from_iter([PeerId::random(), PeerId::random(), common_peer]);
+	to_peerset
+		.unbounded_send(PeersetCommand::SetReservedPeers { peers: new_reserved_peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert_eq!(out_peers.len(), 2);
+
+			for peer in &out_peers {
+				assert!(reserved.contains(peer));
+
+				if peer != &common_peer {
+					assert!(!peerset.reserved_peers().contains(peer));
+					assert_eq!(
+						peerset.peers().get(peer),
+						Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
+					);
+				} else {
+					panic!("common peer disconnected");
+				}
+			}
+
+			for peer in &new_reserved_peers {
+				assert!(peerset.reserved_peers().contains(peer));
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify the `common_peer` peer between the reserved sets is still in the state `Open`
+	assert_eq!(
+		peerset.peers().get(&common_peer),
+		Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+	);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert!(!out_peers.is_empty());
+			assert_eq!(out_peers.len(), 2);
+
+			for peer in &new_reserved_peers {
+				assert!(peerset.reserved_peers().contains(peer));
+
+				if peer != &common_peer {
+					assert_eq!(
+						peerset.peers().get(peer),
+						Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
+					);
+				}
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn add_reserved_peers_one_peer_already_in_the_set() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let reserved = (0..3)
+		.map(|_| {
+			let peer = PeerId::random();
+			peerstore_handle.add_known_peer(peer);
+			peer
+		})
+		.collect::<Vec<_>>();
+	let common_peer = *reserved.iter().next().unwrap();
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		true,
+		reserved.iter().cloned().collect(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+			assert_eq!(out_peers.len(), 3);
+
+			for outbound_peer in &out_peers {
+				assert!(reserved.contains(outbound_peer));
+				assert!(peerset.reserved_peers().contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// report that all substreams were opened
+	for peer in &reserved {
+		assert!(std::matches!(
+			peerset.report_substream_opened(*peer, traits::Direction::Outbound),
+			OpenResult::Accept { .. }
+		));
+		assert_eq!(
+			peerset.peers().get(peer),
+			Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+		);
+	}
+
+	// add a new set of reserved peers with one peer from the original set
+	let new_reserved_peers = HashSet::from_iter([PeerId::random(), PeerId::random(), common_peer]);
+	to_peerset
+		.unbounded_send(PeersetCommand::AddReservedPeers { peers: new_reserved_peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(out_peers.len(), 2);
+			assert!(!out_peers.iter().any(|peer| peer == &common_peer));
+
+			for peer in &out_peers {
+				assert!(!reserved.contains(peer));
+
+				if peer != &common_peer {
+					assert!(peerset.reserved_peers().contains(peer));
+					assert_eq!(
+						peerset.peers().get(peer),
+						Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
+					);
+				}
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify the `common_peer` peer between the reserved sets is still in the state `Open`
+	assert_eq!(
+		peerset.peers().get(&common_peer),
+		Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
+	);
+}
+
+#[tokio::test]
+async fn opening_peer_gets_canceled_and_disconnected() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let _known_peers = (0..1)
+		.map(|_| {
+			let peer = PeerId::random();
+			peerstore_handle.add_known_peer(peer);
+			peer
+		})
+		.collect::<Vec<_>>();
+	let num_connected = Arc::new(Default::default());
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		false,
+		Default::default(),
+		Arc::clone(&num_connected),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0);
+	assert_eq!(peerset.num_out(), 0);
+
+	let peer = match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0);
+			assert_eq!(peerset.num_out(), 1);
+			assert_eq!(out_peers.len(), 1);
+
+			for peer in &out_peers {
+				assert_eq!(
+					peerset.peers().get(&peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
+				);
+			}
+
+			out_peers[0]
+		},
+		event => panic!("invalid event: {event:?}"),
+	};
+
+	// disconnect the now-opening peer
+	to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
+
+	// poll `Peerset` to register the command and verify the peer is now in state `Canceled`
+	futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
+		Poll::Pending => Poll::Ready(()),
+		_ => panic!("unexpected event"),
+	})
+	.await;
+
+	assert_eq!(
+		peerset.peers().get(&peer),
+		Some(&PeerState::Canceled { direction: Direction::Outbound(Reserved::No) })
+	);
+	assert_eq!(peerset.num_out(), 1);
+
+	// report to `Peerset` that the substream was opened, verify that it gets closed
+	assert!(std::matches!(
+		peerset.report_substream_opened(peer, traits::Direction::Outbound),
+		OpenResult::Reject { .. }
+	));
+	assert_eq!(
+		peerset.peers().get(&peer),
+		Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::No) })
+	);
+	assert_eq!(num_connected.load(Ordering::SeqCst), 1);
+	assert_eq!(peerset.num_out(), 1);
+
+	// report close event to `Peerset` and verify state
+	peerset.report_substream_closed(peer);
+	assert_eq!(peerset.num_out(), 0);
+	assert_eq!(num_connected.load(Ordering::SeqCst), 0);
+	assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
+}
+
+#[tokio::test]
+async fn open_failure_for_canceled_peer() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let _known_peers = (0..1)
+		.map(|_| {
+			let peer = PeerId::random();
+			peerstore_handle.add_known_peer(peer);
+			peer
+		})
+		.collect::<Vec<_>>();
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		false,
+		Default::default(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	let peer = match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 1usize);
+			assert_eq!(out_peers.len(), 1);
+
+			for peer in &out_peers {
+				assert_eq!(
+					peerset.peers().get(&peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
+				);
+			}
+
+			out_peers[0]
+		},
+		event => panic!("invalid event: {event:?}"),
+	};
+
+	// disconnect the now-opening peer
+	to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
+
+	// poll `Peerset` to register the command and verify the peer is now in state `Canceled`
+	futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
+		Poll::Pending => Poll::Ready(()),
+		_ => panic!("unexpected event"),
+	})
+	.await;
+
+	assert_eq!(
+		peerset.peers().get(&peer),
+		Some(&PeerState::Canceled { direction: Direction::Outbound(Reserved::No) })
+	);
+
+	// the substream failed to open, verify that peer state is now `Backoff`
+	// and that `Peerset` doesn't emit any events
+	peerset.report_substream_open_failure(peer, NotificationError::NoConnection);
+	assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
+
+	futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
+		Poll::Pending => Poll::Ready(()),
+		_ => panic!("unexpected event"),
+	})
+	.await;
+}
+
+#[tokio::test]
+async fn peer_disconnected_when_being_validated_then_rejected() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let (mut peerset, _to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		false,
+		Default::default(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	// inbound substream received
+	let peer = PeerId::random();
+	assert_eq!(peerset.report_inbound_substream(peer), ValidationResult::Accept);
+
+	// substream failed to open while it was being validated by the protocol
+	peerset.report_substream_open_failure(peer, NotificationError::NoConnection);
+	assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
+
+	// protocol rejected substream, verify
+	peerset.report_substream_rejected(peer);
+	assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
+}
+
+#[tokio::test]
+async fn removed_reserved_peer_kept_due_to_free_slots() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
+
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		true,
+		peers.clone(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(peerset.num_in(), 0usize);
+			assert_eq!(peerset.num_out(), 0usize);
+
+			for outbound_peer in &out_peers {
+				assert!(peers.contains(outbound_peer));
+				assert_eq!(
+					peerset.peers().get(&outbound_peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// remove all reserved peers
+	to_peerset
+		.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert!(out_peers.is_empty());
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify all reserved peers are canceled
+	for (_, state) in peerset.peers() {
+		assert_eq!(state, &PeerState::Opening { direction: Direction::Outbound(Reserved::No) });
+	}
+
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 3usize);
+}
+
+#[tokio::test]
+async fn set_reserved_peers_but_available_slots() {
+	sp_tracing::try_init_simple();
+
+	let peerstore_handle = Arc::new(peerstore_handle_test());
+	let known_peers = (0..3)
+		.map(|_| {
+			let peer = PeerId::random();
+			peerstore_handle.add_known_peer(peer);
+			peer
+		})
+		.collect::<Vec<_>>();
+
+	// one peer is common across operations meaning an outbound substream will be opened to them
+	// when `Peerset` is polled (along with two random peers) and later on `SetReservedPeers`
+	// is called with the common peer and with two new random peers
+	let common_peer = *known_peers.iter().next().unwrap();
+	let disconnected_peers = known_peers.iter().skip(1).copied().collect::<HashSet<_>>();
+	assert_eq!(disconnected_peers.len(), 2);
+
+	let (mut peerset, to_peerset) = Peerset::new(
+		ProtocolName::from("/notif/1"),
+		25,
+		25,
+		false,
+		Default::default(),
+		Default::default(),
+		peerstore_handle,
+	);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 0usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
+			assert_eq!(out_peers.len(), 3);
+
+			for peer in &out_peers {
+				assert_eq!(
+					peerset.peers().get(&peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify all three peers are counted as outbound peers
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 3usize);
+
+	// report that all substreams were opened
+	for peer in &known_peers {
+		assert!(std::matches!(
+			peerset.report_substream_opened(*peer, traits::Direction::Outbound),
+			OpenResult::Accept { .. }
+		));
+		assert_eq!(
+			peerset.peers().get(peer),
+			Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::No) })
+		);
+	}
+
+	// set reserved peers with `common_peer` being one of them
+	let reserved_peers = HashSet::from_iter([common_peer, PeerId::random(), PeerId::random()]);
+	to_peerset
+		.unbounded_send(PeersetCommand::SetReservedPeers { peers: reserved_peers.clone() })
+		.unwrap();
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
+			assert_eq!(out_peers.len(), 2);
+
+			for peer in &out_peers {
+				assert!(disconnected_peers.contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::No) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	// verify that `Peerset` is aware of five peers, with two of them as outbound
+	// (the two disconnected peers)
+	assert_eq!(peerset.peers().len(), 5);
+	assert_eq!(peerset.num_in(), 0usize);
+	assert_eq!(peerset.num_out(), 2usize);
+
+	match peerset.next().await {
+		Some(PeersetNotificationCommand::OpenSubstream { peers }) => {
+			assert_eq!(peers.len(), 2);
+			assert!(!peers.contains(&common_peer));
+
+			for peer in &peers {
+				assert!(reserved_peers.contains(peer));
+				assert!(peerset.reserved_peers().contains(peer));
+				assert_eq!(
+					peerset.peers().get(peer),
+					Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
+				);
+			}
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	assert_eq!(peerset.peers().len(), 5);
+	assert_eq!(peerset.num_in(), 0usize);
+
+	// two substreams are closing still closing
+	assert_eq!(peerset.num_out(), 2usize);
+}
diff --git a/substrate/client/network/src/litep2p/shim/request_response/metrics.rs b/substrate/client/network/src/litep2p/shim/request_response/metrics.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b04b6ed92de48e312fcbb02268a4dcafc199b32d
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/request_response/metrics.rs
@@ -0,0 +1,78 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Metrics for [`RequestResponseProtocol`](super::RequestResponseProtocol).
+
+use crate::{service::metrics::Metrics, types::ProtocolName};
+
+use std::time::Duration;
+
+/// Request-response metrics.
+pub struct RequestResponseMetrics {
+	/// Metrics.
+	metrics: Option<Metrics>,
+
+	/// Protocol name.
+	protocol: ProtocolName,
+}
+
+impl RequestResponseMetrics {
+	pub fn new(metrics: Option<Metrics>, protocol: ProtocolName) -> Self {
+		Self { metrics, protocol }
+	}
+
+	/// Register inbound request failure to Prometheus
+	pub fn register_inbound_request_failure(&self, reason: &str) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.requests_in_failure_total
+				.with_label_values(&[&self.protocol, reason])
+				.inc();
+		}
+	}
+
+	/// Register inbound request success to Prometheus
+	pub fn register_inbound_request_success(&self, serve_time: Duration) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.requests_in_success_total
+				.with_label_values(&[&self.protocol])
+				.observe(serve_time.as_secs_f64());
+		}
+	}
+
+	/// Register inbound request failure to Prometheus
+	pub fn register_outbound_request_failure(&self, reason: &str) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.requests_out_failure_total
+				.with_label_values(&[&self.protocol, reason])
+				.inc();
+		}
+	}
+
+	/// Register inbound request success to Prometheus
+	pub fn register_outbound_request_success(&self, duration: Duration) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.requests_out_success_total
+				.with_label_values(&[&self.protocol])
+				.observe(duration.as_secs_f64());
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..82d01c01236f215a5123c34128f9b550e7abd4e9
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs
@@ -0,0 +1,529 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Shim for litep2p's request-response implementation to make it work with `sc_network`'s
+//! request-response API.
+
+use crate::{
+	litep2p::shim::request_response::metrics::RequestResponseMetrics,
+	peer_store::PeerStoreProvider,
+	request_responses::{IncomingRequest, OutgoingResponse},
+	service::{metrics::Metrics, traits::RequestResponseConfig as RequestResponseConfigT},
+	IfDisconnected, ProtocolName, RequestFailure,
+};
+
+use futures::{channel::oneshot, future::BoxFuture, stream::FuturesUnordered, StreamExt};
+use litep2p::{
+	protocol::request_response::{
+		DialOptions, RequestResponseError, RequestResponseEvent, RequestResponseHandle,
+	},
+	types::RequestId,
+};
+
+use sc_network_types::PeerId;
+use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender};
+
+use std::{
+	collections::HashMap,
+	sync::Arc,
+	time::{Duration, Instant},
+};
+
+mod metrics;
+
+#[cfg(test)]
+mod tests;
+
+/// Logging target for the file.
+const LOG_TARGET: &str = "sub-libp2p::request-response";
+
+/// Type containing information related to an outbound request.
+#[derive(Debug)]
+pub struct OutboundRequest {
+	/// Peer ID.
+	peer: PeerId,
+
+	/// Request.
+	request: Vec<u8>,
+
+	/// Fallback request, if provided.
+	fallback_request: Option<(Vec<u8>, ProtocolName)>,
+
+	/// `oneshot::Sender` for sending the received response, or failure.
+	sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+
+	/// What should the node do if `peer` is disconnected.
+	dial_behavior: IfDisconnected,
+}
+
+impl OutboundRequest {
+	/// Create new [`OutboundRequest`].
+	pub fn new(
+		peer: PeerId,
+		request: Vec<u8>,
+		sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		dial_behavior: IfDisconnected,
+	) -> Self {
+		OutboundRequest { peer, request, sender, fallback_request, dial_behavior }
+	}
+}
+
+/// Pending request.
+struct PendingRequest {
+	tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+	started: Instant,
+	fallback_request: Option<(Vec<u8>, ProtocolName)>,
+}
+
+impl PendingRequest {
+	/// Create new [`PendingRequest`].
+	fn new(
+		tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+		started: Instant,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+	) -> Self {
+		Self { tx, started, fallback_request }
+	}
+}
+
+/// Request-response protocol configuration.
+///
+/// See [`RequestResponseConfiguration`](crate::request_response::ProtocolConfig) for more details.
+#[derive(Debug)]
+pub struct RequestResponseConfig {
+	/// Name of the protocol on the wire. Should be something like `/foo/bar`.
+	pub protocol_name: ProtocolName,
+
+	/// Fallback on the wire protocol names to support.
+	pub fallback_names: Vec<ProtocolName>,
+
+	/// Maximum allowed size, in bytes, of a request.
+	pub max_request_size: u64,
+
+	/// Maximum allowed size, in bytes, of a response.
+	pub max_response_size: u64,
+
+	/// Duration after which emitted requests are considered timed out.
+	pub request_timeout: Duration,
+
+	/// Channel on which the networking service will send incoming requests.
+	pub inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+}
+
+impl RequestResponseConfig {
+	/// Create new [`RequestResponseConfig`].
+	pub(crate) fn new(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_request_size: u64,
+		max_response_size: u64,
+		request_timeout: Duration,
+		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+	) -> Self {
+		Self {
+			protocol_name,
+			fallback_names,
+			max_request_size,
+			max_response_size,
+			request_timeout,
+			inbound_queue,
+		}
+	}
+}
+
+impl RequestResponseConfigT for RequestResponseConfig {
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+}
+
+/// Request-response protocol.
+///
+/// This is slightly different from the `RequestResponsesBehaviour` in that it is protocol-specific,
+/// meaning there is an instance of `RequestResponseProtocol` for each installed request-response
+/// protocol and that instance deals only with the requests and responses of that protocol, nothing
+/// else. It also differs from the other implementation by combining both inbound and outbound
+/// requests under one instance so all request-response-related behavior of any given protocol is
+/// handled through one instance of `RequestResponseProtocol`.
+pub struct RequestResponseProtocol {
+	/// Protocol name.
+	protocol: ProtocolName,
+
+	/// Handle to request-response protocol.
+	handle: RequestResponseHandle,
+
+	/// Inbound queue for sending received requests to protocol implementation in Polkadot SDK.
+	inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+
+	/// Handle to `Peerstore`.
+	peerstore_handle: Arc<dyn PeerStoreProvider>,
+
+	/// Pending responses.
+	pending_inbound_responses: HashMap<RequestId, PendingRequest>,
+
+	/// Pending outbound responses.
+	pending_outbound_responses: FuturesUnordered<
+		BoxFuture<'static, (litep2p::PeerId, RequestId, Result<OutgoingResponse, ()>, Instant)>,
+	>,
+
+	/// RX channel for receiving info for outbound requests.
+	request_rx: TracingUnboundedReceiver<OutboundRequest>,
+
+	/// Map of supported request-response protocols which are used to support fallback requests.
+	///
+	/// If negotiation for the main protocol fails and the request was sent with a fallback,
+	/// [`RequestResponseProtocol`] queries this map and sends the request that protocol for
+	/// processing.
+	request_tx: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
+
+	/// Metrics, if enabled.
+	metrics: RequestResponseMetrics,
+}
+
+impl RequestResponseProtocol {
+	/// Create new [`RequestResponseProtocol`].
+	pub fn new(
+		protocol: ProtocolName,
+		handle: RequestResponseHandle,
+		peerstore_handle: Arc<dyn PeerStoreProvider>,
+		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+		request_rx: TracingUnboundedReceiver<OutboundRequest>,
+		request_tx: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
+		metrics: Option<Metrics>,
+	) -> Self {
+		Self {
+			handle,
+			request_rx,
+			request_tx,
+			inbound_queue,
+			peerstore_handle,
+			protocol: protocol.clone(),
+			pending_inbound_responses: HashMap::new(),
+			pending_outbound_responses: FuturesUnordered::new(),
+			metrics: RequestResponseMetrics::new(metrics, protocol),
+		}
+	}
+
+	/// Send `request` to `peer`.
+	async fn on_send_request(
+		&mut self,
+		peer: PeerId,
+		request: Vec<u8>,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+		connect: IfDisconnected,
+	) {
+		let dial_options = match connect {
+			IfDisconnected::TryConnect => DialOptions::Dial,
+			IfDisconnected::ImmediateError => DialOptions::Reject,
+		};
+
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: send request to {:?} (fallback {:?}) (dial options: {:?})",
+			self.protocol,
+			peer,
+			fallback_request,
+			dial_options,
+		);
+
+		match self.handle.try_send_request(peer.into(), request, dial_options) {
+			Ok(request_id) => {
+				self.pending_inbound_responses
+					.insert(request_id, PendingRequest::new(tx, Instant::now(), fallback_request));
+			},
+			Err(error) => {
+				log::warn!(
+					target: LOG_TARGET,
+					"{}: failed to send request to {peer:?}: {error:?}",
+					self.protocol,
+				);
+
+				let _ = tx.send(Err(RequestFailure::Refused));
+				self.metrics.register_inbound_request_failure(error.to_string().as_ref());
+			},
+		}
+	}
+
+	/// Handle inbound request from `peer`
+	///
+	/// If the protocol is configured outbound only, reject the request immediately.
+	fn on_inbound_request(
+		&mut self,
+		peer: litep2p::PeerId,
+		fallback: Option<litep2p::ProtocolName>,
+		request_id: RequestId,
+		request: Vec<u8>,
+	) {
+		let Some(inbound_queue) = &self.inbound_queue else {
+			log::trace!(
+				target: LOG_TARGET,
+				"{}: rejecting inbound request from {peer:?}, protocol configured as outbound only",
+				self.protocol,
+			);
+
+			self.handle.reject_request(request_id);
+			return;
+		};
+
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}",
+			self.protocol,
+			request.len(),
+		);
+		let (tx, rx) = oneshot::channel();
+
+		match inbound_queue.try_send(IncomingRequest {
+			peer: peer.into(),
+			payload: request,
+			pending_response: tx,
+		}) {
+			Ok(_) => {
+				self.pending_outbound_responses.push(Box::pin(async move {
+					(peer, request_id, rx.await.map_err(|_| ()), Instant::now())
+				}));
+			},
+			Err(error) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{:?}: dropping request from {peer:?} ({request_id:?}), inbound queue full",
+					self.protocol,
+				);
+
+				self.handle.reject_request(request_id);
+				self.metrics.register_inbound_request_failure(error.to_string().as_ref());
+			},
+		}
+	}
+
+	/// Handle received inbound response.
+	fn on_inbound_response(
+		&mut self,
+		peer: litep2p::PeerId,
+		request_id: RequestId,
+		fallback: Option<litep2p::ProtocolName>,
+		response: Vec<u8>,
+	) {
+		match self.pending_inbound_responses.remove(&request_id) {
+			None => log::warn!(
+				target: LOG_TARGET,
+				"{:?}: response received for {peer:?} but {request_id:?} doesn't exist",
+				self.protocol,
+			),
+			Some(PendingRequest { tx, started, .. }) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{:?}: response received for {peer:?} ({request_id:?}), response size {:?}",
+					self.protocol,
+					response.len(),
+				);
+
+				let _ = tx.send(Ok((
+					response,
+					fallback.map_or_else(|| self.protocol.clone(), Into::into),
+				)));
+				self.metrics.register_outbound_request_success(started.elapsed());
+			},
+		}
+	}
+
+	/// Handle failed outbound request.
+	fn on_request_failed(
+		&mut self,
+		peer: litep2p::PeerId,
+		request_id: RequestId,
+		error: RequestResponseError,
+	) {
+		log::debug!(
+			target: LOG_TARGET,
+			"{:?}: request failed for {peer:?} ({request_id:?}): {error:?}",
+			self.protocol
+		);
+
+		let Some(PendingRequest { tx, fallback_request, .. }) =
+			self.pending_inbound_responses.remove(&request_id)
+		else {
+			log::warn!(
+				target: LOG_TARGET,
+				"{:?}: request failed for peer {peer:?} but {request_id:?} doesn't exist",
+				self.protocol,
+			);
+
+			return
+		};
+
+		let error = match error {
+			RequestResponseError::NotConnected => Some(RequestFailure::NotConnected),
+			RequestResponseError::Rejected | RequestResponseError::Timeout =>
+				Some(RequestFailure::Refused),
+			RequestResponseError::Canceled => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: request canceled by local node to {peer:?} ({request_id:?})",
+					self.protocol,
+				);
+				None
+			},
+			RequestResponseError::TooLargePayload => {
+				log::warn!(
+					target: LOG_TARGET,
+					"{}: tried to send too large request to {peer:?} ({request_id:?})",
+					self.protocol,
+				);
+				Some(RequestFailure::Refused)
+			},
+			RequestResponseError::UnsupportedProtocol => match fallback_request {
+				Some((request, protocol)) => match self.request_tx.get(&protocol) {
+					Some(sender) => {
+						log::debug!(
+							target: LOG_TARGET,
+							"{}: failed to negotiate protocol with {:?}, try fallback request: ({})",
+							self.protocol,
+							peer,
+							protocol,
+						);
+
+						let outbound_request = OutboundRequest::new(
+							peer.into(),
+							request,
+							tx,
+							None,
+							IfDisconnected::ImmediateError,
+						);
+
+						// since remote peer doesn't support the main protocol (`self.protocol`),
+						// try to send the request over a fallback protocol by creating a new
+						// `OutboundRequest` from the original data, now with the fallback request
+						// payload, and send it over to the (fallback) request handler like it was
+						// a normal request.
+						let _ = sender.unbounded_send(outbound_request);
+
+						return;
+					},
+					None => {
+						log::warn!(
+							target: LOG_TARGET,
+							"{}: fallback request provided but protocol ({}) doesn't exist (peer {:?})",
+							self.protocol,
+							protocol,
+							peer,
+						);
+
+						Some(RequestFailure::Refused)
+					},
+				},
+				None => Some(RequestFailure::Refused),
+			},
+		};
+
+		if let Some(error) = error {
+			self.metrics.register_outbound_request_failure(error.to_string().as_ref());
+			let _ = tx.send(Err(error));
+		}
+	}
+
+	/// Handle outbound response.
+	fn on_outbound_response(
+		&mut self,
+		peer: litep2p::PeerId,
+		request_id: RequestId,
+		response: OutgoingResponse,
+		started: Instant,
+	) {
+		let OutgoingResponse { result, reputation_changes, sent_feedback } = response;
+
+		for change in reputation_changes {
+			log::trace!(target: LOG_TARGET, "{}: report {peer:?}: {change:?}", self.protocol);
+			self.peerstore_handle.report_peer(peer.into(), change);
+		}
+
+		match result {
+			Err(()) => {
+				log::debug!(
+					target: LOG_TARGET,
+					"{}: response rejected ({request_id:?}) for {peer:?}",
+					self.protocol,
+				);
+
+				self.handle.reject_request(request_id);
+				self.metrics.register_inbound_request_failure("rejected");
+			},
+			Ok(response) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"{}: send response ({request_id:?}) to {peer:?}, response size {}",
+					self.protocol,
+					response.len(),
+				);
+
+				match sent_feedback {
+					None => self.handle.send_response(request_id, response),
+					Some(feedback) =>
+						self.handle.send_response_with_feedback(request_id, response, feedback),
+				}
+
+				self.metrics.register_inbound_request_success(started.elapsed());
+			},
+		}
+	}
+
+	/// Start running event loop of the request-response protocol.
+	pub async fn run(mut self) {
+		loop {
+			tokio::select! {
+				event = self.handle.next() => match event {
+					None => return,
+					Some(RequestResponseEvent::RequestReceived {
+						peer,
+						fallback,
+						request_id,
+						request,
+					}) => self.on_inbound_request(peer, fallback, request_id, request),
+					Some(RequestResponseEvent::ResponseReceived { peer, request_id, fallback, response }) => {
+						self.on_inbound_response(peer, request_id, fallback, response);
+					},
+					Some(RequestResponseEvent::RequestFailed { peer, request_id, error }) => {
+						self.on_request_failed(peer, request_id, error);
+					},
+				},
+				event = self.pending_outbound_responses.next(), if !self.pending_outbound_responses.is_empty() => match event {
+					None => return,
+					Some((peer, request_id, Err(()), _)) => {
+						log::debug!(target: LOG_TARGET, "{}: reject request ({request_id:?}) from {peer:?}", self.protocol);
+
+						self.handle.reject_request(request_id);
+						self.metrics.register_inbound_request_failure("rejected");
+					}
+					Some((peer, request_id, Ok(response), started)) => {
+						self.on_outbound_response(peer, request_id, response, started);
+					}
+				},
+				event = self.request_rx.next() => match event {
+					None => return,
+					Some(outbound_request) => {
+						let OutboundRequest { peer, request, sender, dial_behavior, fallback_request } = outbound_request;
+
+						self.on_send_request(peer, request, fallback_request, sender, dial_behavior).await;
+					}
+				}
+			}
+		}
+	}
+}
diff --git a/substrate/client/network/src/litep2p/shim/request_response/tests.rs b/substrate/client/network/src/litep2p/shim/request_response/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e3e82aa395c58f96d4f8d4844057c39d349541c8
--- /dev/null
+++ b/substrate/client/network/src/litep2p/shim/request_response/tests.rs
@@ -0,0 +1,901 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use crate::{
+	litep2p::{
+		peerstore::peerstore_handle_test,
+		shim::request_response::{OutboundRequest, RequestResponseProtocol},
+	},
+	request_responses::{IfDisconnected, IncomingRequest, OutgoingResponse},
+	ProtocolName, RequestFailure,
+};
+
+use futures::{channel::oneshot, StreamExt};
+use litep2p::{
+	config::ConfigBuilder as Litep2pConfigBuilder,
+	protocol::request_response::{
+		ConfigBuilder, DialOptions, RequestResponseError, RequestResponseEvent,
+		RequestResponseHandle,
+	},
+	transport::tcp::config::Config as TcpConfig,
+	Litep2p, Litep2pEvent,
+};
+
+use sc_network_types::PeerId;
+use sc_utils::mpsc::tracing_unbounded;
+
+use std::{collections::HashMap, sync::Arc, task::Poll};
+
+/// Create `litep2p` for testing.
+async fn make_litep2p() -> (Litep2p, RequestResponseHandle) {
+	let (config, handle) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+		.with_max_size(1024)
+		.build();
+
+	(
+		Litep2p::new(
+			Litep2pConfigBuilder::new()
+				.with_request_response_protocol(config)
+				.with_tcp(TcpConfig {
+					listen_addresses: vec![
+						"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+						"/ip6/::/tcp/0".parse().unwrap(),
+					],
+					..Default::default()
+				})
+				.build(),
+		)
+		.unwrap(),
+		handle,
+	)
+}
+
+// connect two `litep2p` instances together
+async fn connect_peers(litep2p1: &mut Litep2p, litep2p2: &mut Litep2p) {
+	let address = litep2p2.listen_addresses().next().unwrap().clone();
+	litep2p1.dial_address(address).await.unwrap();
+
+	let mut litep2p1_connected = false;
+	let mut litep2p2_connected = false;
+
+	loop {
+		tokio::select! {
+			event = litep2p1.next_event() => match event.unwrap() {
+				Litep2pEvent::ConnectionEstablished { .. } => {
+					litep2p1_connected = true;
+				}
+				_ => {},
+			},
+			event = litep2p2.next_event() => match event.unwrap() {
+				Litep2pEvent::ConnectionEstablished { .. } => {
+					litep2p2_connected = true;
+				}
+				_ => {},
+			}
+		}
+
+		if litep2p1_connected && litep2p2_connected {
+			break
+		}
+	}
+}
+
+#[tokio::test]
+async fn dial_failure() {
+	let (mut litep2p, handle) = make_litep2p().await;
+	let (tx, _rx) = async_channel::bounded(64);
+	let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx.clone())]);
+
+	let protocol = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle,
+		Arc::new(peerstore_handle_test()),
+		Some(tx),
+		outbound_rx,
+		senders,
+		None,
+	);
+
+	tokio::spawn(protocol.run());
+	tokio::spawn(async move { while let Some(_) = litep2p.next_event().await {} });
+
+	let peer = PeerId::random();
+	let (result_tx, result_rx) = oneshot::channel();
+
+	outbound_tx
+		.unbounded_send(OutboundRequest {
+			peer,
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: None,
+			dial_behavior: IfDisconnected::TryConnect,
+		})
+		.unwrap();
+
+	assert!(std::matches!(result_rx.await, Ok(Err(RequestFailure::Refused))));
+}
+
+#[tokio::test]
+async fn send_request_to_disconnected_peer() {
+	let (mut litep2p, handle) = make_litep2p().await;
+	let (tx, _rx) = async_channel::bounded(64);
+	let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx.clone())]);
+
+	let protocol = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle,
+		Arc::new(peerstore_handle_test()),
+		Some(tx),
+		outbound_rx,
+		senders,
+		None,
+	);
+
+	tokio::spawn(protocol.run());
+	tokio::spawn(async move { while let Some(_) = litep2p.next_event().await {} });
+
+	let peer = PeerId::random();
+	let (result_tx, result_rx) = oneshot::channel();
+
+	outbound_tx
+		.unbounded_send(OutboundRequest {
+			peer,
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: None,
+			dial_behavior: IfDisconnected::ImmediateError,
+		})
+		.unwrap();
+
+	assert!(std::matches!(result_rx.await, Ok(Err(RequestFailure::NotConnected))));
+}
+
+#[tokio::test]
+async fn send_request_to_disconnected_peer_and_dial() {
+	let (mut litep2p1, handle1) = make_litep2p().await;
+	let (mut litep2p2, handle2) = make_litep2p().await;
+
+	let peer1 = *litep2p1.local_peer_id();
+	let peer2 = *litep2p2.local_peer_id();
+
+	litep2p1.add_known_address(
+		peer2,
+		std::iter::once(litep2p2.listen_addresses().next().expect("listen address").clone()),
+	);
+
+	let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx1.clone())]);
+	let (tx1, _rx1) = async_channel::bounded(64);
+
+	let protocol1 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx1),
+		outbound_rx1,
+		senders,
+		None,
+	);
+
+	let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
+	let (tx2, rx2) = async_channel::bounded(64);
+
+	let protocol2 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx2),
+		outbound_rx2,
+		senders,
+		None,
+	);
+
+	tokio::spawn(protocol1.run());
+	tokio::spawn(protocol2.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let (result_tx, _result_rx) = oneshot::channel();
+	outbound_tx1
+		.unbounded_send(OutboundRequest {
+			peer: peer2.into(),
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: None,
+			dial_behavior: IfDisconnected::TryConnect,
+		})
+		.unwrap();
+
+	match rx2.recv().await {
+		Ok(IncomingRequest { peer, payload, .. }) => {
+			assert_eq!(peer, Into::<PeerId>::into(peer1));
+			assert_eq!(payload, vec![1, 2, 3, 4]);
+		},
+		Err(error) => panic!("unexpected error: {error:?}"),
+	}
+}
+
+#[tokio::test]
+async fn too_many_inbound_requests() {
+	let (mut litep2p1, handle1) = make_litep2p().await;
+	let (mut litep2p2, mut handle2) = make_litep2p().await;
+	let peer1 = *litep2p1.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx)]);
+	let (tx, _rx) = async_channel::bounded(4);
+
+	let protocol = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx),
+		outbound_rx,
+		senders,
+		None,
+	);
+
+	tokio::spawn(protocol.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	// send 5 request and verify that one of the requests will fail
+	for _ in 0..5 {
+		handle2
+			.send_request(peer1, vec![1, 2, 3, 4], DialOptions::Reject)
+			.await
+			.unwrap();
+	}
+
+	// verify that one of the requests is rejected
+	match handle2.next().await {
+		Some(RequestResponseEvent::RequestFailed { peer, error, .. }) => {
+			assert_eq!(peer, peer1);
+			assert_eq!(error, RequestResponseError::Rejected);
+		},
+		event => panic!("inavlid event: {event:?}"),
+	}
+
+	// verify that no other events are read from the handle
+	futures::future::poll_fn(|cx| match handle2.poll_next_unpin(cx) {
+		Poll::Pending => Poll::Ready(()),
+		event => panic!("invalid event: {event:?}"),
+	})
+	.await;
+}
+
+#[tokio::test]
+async fn feedback_works() {
+	let (mut litep2p1, handle1) = make_litep2p().await;
+	let (mut litep2p2, mut handle2) = make_litep2p().await;
+
+	let peer1 = *litep2p1.local_peer_id();
+	let peer2 = *litep2p2.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
+	let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx)]);
+	let (tx, rx) = async_channel::bounded(4);
+
+	let protocol = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx),
+		outbound_rx,
+		senders,
+		None,
+	);
+
+	tokio::spawn(protocol.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let request_id = handle2
+		.send_request(peer1, vec![1, 2, 3, 4], DialOptions::Reject)
+		.await
+		.unwrap();
+
+	let rx = match rx.recv().await {
+		Ok(IncomingRequest { peer, payload, pending_response }) => {
+			assert_eq!(peer, peer2.into());
+			assert_eq!(payload, vec![1, 2, 3, 4]);
+
+			let (tx, rx) = oneshot::channel();
+			pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![5, 6, 7, 8]),
+					reputation_changes: Vec::new(),
+					sent_feedback: Some(tx),
+				})
+				.unwrap();
+			rx
+		},
+		event => panic!("invalid event: {event:?}"),
+	};
+
+	match handle2.next().await {
+		Some(RequestResponseEvent::ResponseReceived {
+			peer,
+			request_id: received_id,
+			response,
+			..
+		}) => {
+			assert_eq!(peer, peer1);
+			assert_eq!(request_id, received_id);
+			assert_eq!(response, vec![5, 6, 7, 8]);
+			assert!(rx.await.is_ok());
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn fallback_request_compatible_peers() {
+	// `litep2p1` supports both the new and the old protocol
+	let (mut litep2p1, handle1_1, handle1_2) = {
+		let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
+			.with_max_size(1024)
+			.build();
+
+		let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+			.with_max_size(1024)
+			.build();
+		(
+			Litep2p::new(
+				Litep2pConfigBuilder::new()
+					.with_request_response_protocol(config1)
+					.with_request_response_protocol(config2)
+					.with_tcp(TcpConfig {
+						listen_addresses: vec![
+							"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+							"/ip6/::/tcp/0".parse().unwrap(),
+						],
+						..Default::default()
+					})
+					.build(),
+			)
+			.unwrap(),
+			handle1,
+			handle2,
+		)
+	};
+
+	// `litep2p2` supports only the new protocol
+	let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
+		.with_max_size(1024)
+		.build();
+
+	let mut litep2p2 = Litep2p::new(
+		Litep2pConfigBuilder::new()
+			.with_request_response_protocol(config2)
+			.with_tcp(TcpConfig {
+				listen_addresses: vec![
+					"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+					"/ip6/::/tcp/0".parse().unwrap(),
+				],
+				..Default::default()
+			})
+			.build(),
+	)
+	.unwrap();
+
+	let peer1 = *litep2p1.local_peer_id();
+	let peer2 = *litep2p2.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
+	let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
+
+	let senders1 = HashMap::from_iter([
+		(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
+		(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
+	]);
+
+	let (tx1, _rx1) = async_channel::bounded(4);
+	let protocol1 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/2"),
+		handle1_1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx1),
+		outbound_rx1,
+		senders1.clone(),
+		None,
+	);
+
+	let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
+	let protocol_fallback = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1_2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx_fallback),
+		outbound_rx_fallback,
+		senders1,
+		None,
+	);
+
+	let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
+	let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/2"), outbound_tx2)]);
+
+	let (tx2, rx2) = async_channel::bounded(4);
+	let protocol2 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/2"),
+		handle2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx2),
+		outbound_rx2,
+		senders2,
+		None,
+	);
+
+	tokio::spawn(protocol1.run());
+	tokio::spawn(protocol2.run());
+	tokio::spawn(protocol_fallback.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let (result_tx, result_rx) = oneshot::channel();
+	outbound_tx1
+		.unbounded_send(OutboundRequest {
+			peer: peer2.into(),
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: Some((vec![1, 3, 3, 7], ProtocolName::from("/protocol/1"))),
+			dial_behavior: IfDisconnected::ImmediateError,
+		})
+		.unwrap();
+
+	match rx2.recv().await {
+		Ok(IncomingRequest { peer, payload, pending_response }) => {
+			assert_eq!(peer, peer1.into());
+			assert_eq!(payload, vec![1, 2, 3, 4]);
+			pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![5, 6, 7, 8]),
+					reputation_changes: Vec::new(),
+					sent_feedback: None,
+				})
+				.unwrap();
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	match result_rx.await {
+		Ok(Ok((response, protocol))) => {
+			assert_eq!(response, vec![5, 6, 7, 8]);
+			assert_eq!(protocol, ProtocolName::from("/protocol/2"));
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn fallback_request_old_peer_receives() {
+	sp_tracing::try_init_simple();
+
+	// `litep2p1` supports both the new and the old protocol
+	let (mut litep2p1, handle1_1, handle1_2) = {
+		let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
+			.with_max_size(1024)
+			.build();
+
+		let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+			.with_max_size(1024)
+			.build();
+		(
+			Litep2p::new(
+				Litep2pConfigBuilder::new()
+					.with_request_response_protocol(config1)
+					.with_request_response_protocol(config2)
+					.with_tcp(TcpConfig {
+						listen_addresses: vec![
+							"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+							"/ip6/::/tcp/0".parse().unwrap(),
+						],
+						..Default::default()
+					})
+					.build(),
+			)
+			.unwrap(),
+			handle1,
+			handle2,
+		)
+	};
+
+	// `litep2p2` supports only the new protocol
+	let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+		.with_max_size(1024)
+		.build();
+
+	let mut litep2p2 = Litep2p::new(
+		Litep2pConfigBuilder::new()
+			.with_request_response_protocol(config2)
+			.with_tcp(TcpConfig {
+				listen_addresses: vec![
+					"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+					"/ip6/::/tcp/0".parse().unwrap(),
+				],
+				..Default::default()
+			})
+			.build(),
+	)
+	.unwrap();
+
+	let peer1 = *litep2p1.local_peer_id();
+	let peer2 = *litep2p2.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
+	let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
+
+	let senders1 = HashMap::from_iter([
+		(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
+		(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
+	]);
+
+	let (tx1, _rx1) = async_channel::bounded(4);
+	let protocol1 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/2"),
+		handle1_1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx1),
+		outbound_rx1,
+		senders1.clone(),
+		None,
+	);
+
+	let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
+	let protocol_fallback = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1_2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx_fallback),
+		outbound_rx_fallback,
+		senders1,
+		None,
+	);
+
+	let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
+	let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
+
+	let (tx2, rx2) = async_channel::bounded(4);
+	let protocol2 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx2),
+		outbound_rx2,
+		senders2,
+		None,
+	);
+
+	tokio::spawn(protocol1.run());
+	tokio::spawn(protocol2.run());
+	tokio::spawn(protocol_fallback.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let (result_tx, result_rx) = oneshot::channel();
+	outbound_tx1
+		.unbounded_send(OutboundRequest {
+			peer: peer2.into(),
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: Some((vec![1, 3, 3, 7], ProtocolName::from("/protocol/1"))),
+			dial_behavior: IfDisconnected::ImmediateError,
+		})
+		.unwrap();
+
+	match rx2.recv().await {
+		Ok(IncomingRequest { peer, payload, pending_response }) => {
+			assert_eq!(peer, peer1.into());
+			assert_eq!(payload, vec![1, 3, 3, 7]);
+			pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![1, 3, 3, 8]),
+					reputation_changes: Vec::new(),
+					sent_feedback: None,
+				})
+				.unwrap();
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	match result_rx.await {
+		Ok(Ok((response, protocol))) => {
+			assert_eq!(response, vec![1, 3, 3, 8]);
+			assert_eq!(protocol, ProtocolName::from("/protocol/1"));
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn fallback_request_old_peer_sends() {
+	sp_tracing::try_init_simple();
+
+	// `litep2p1` supports both the new and the old protocol
+	let (mut litep2p1, handle1_1, handle1_2) = {
+		let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
+			.with_max_size(1024)
+			.build();
+
+		let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+			.with_max_size(1024)
+			.build();
+		(
+			Litep2p::new(
+				Litep2pConfigBuilder::new()
+					.with_request_response_protocol(config1)
+					.with_request_response_protocol(config2)
+					.with_tcp(TcpConfig {
+						listen_addresses: vec![
+							"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+							"/ip6/::/tcp/0".parse().unwrap(),
+						],
+						..Default::default()
+					})
+					.build(),
+			)
+			.unwrap(),
+			handle1,
+			handle2,
+		)
+	};
+
+	// `litep2p2` supports only the new protocol
+	let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+		.with_max_size(1024)
+		.build();
+
+	let mut litep2p2 = Litep2p::new(
+		Litep2pConfigBuilder::new()
+			.with_request_response_protocol(config2)
+			.with_tcp(TcpConfig {
+				listen_addresses: vec![
+					"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+					"/ip6/::/tcp/0".parse().unwrap(),
+				],
+				..Default::default()
+			})
+			.build(),
+	)
+	.unwrap();
+
+	let peer1 = *litep2p1.local_peer_id();
+	let peer2 = *litep2p2.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
+	let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
+
+	let senders1 = HashMap::from_iter([
+		(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
+		(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
+	]);
+
+	let (tx1, _rx1) = async_channel::bounded(4);
+	let protocol1 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/2"),
+		handle1_1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx1),
+		outbound_rx1,
+		senders1.clone(),
+		None,
+	);
+
+	let (tx_fallback, rx_fallback) = async_channel::bounded(4);
+	let protocol_fallback = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1_2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx_fallback),
+		outbound_rx_fallback,
+		senders1,
+		None,
+	);
+
+	let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
+	let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2.clone())]);
+
+	let (tx2, _rx2) = async_channel::bounded(4);
+	let protocol2 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx2),
+		outbound_rx2,
+		senders2,
+		None,
+	);
+
+	tokio::spawn(protocol1.run());
+	tokio::spawn(protocol2.run());
+	tokio::spawn(protocol_fallback.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let (result_tx, result_rx) = oneshot::channel();
+	outbound_tx2
+		.unbounded_send(OutboundRequest {
+			peer: peer1.into(),
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: None,
+			dial_behavior: IfDisconnected::ImmediateError,
+		})
+		.unwrap();
+
+	match rx_fallback.recv().await {
+		Ok(IncomingRequest { peer, payload, pending_response }) => {
+			assert_eq!(peer, peer2.into());
+			assert_eq!(payload, vec![1, 2, 3, 4]);
+			pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![1, 3, 3, 8]),
+					reputation_changes: Vec::new(),
+					sent_feedback: None,
+				})
+				.unwrap();
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+
+	match result_rx.await {
+		Ok(Ok((response, protocol))) => {
+			assert_eq!(response, vec![1, 3, 3, 8]);
+			assert_eq!(protocol, ProtocolName::from("/protocol/1"));
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
+
+#[tokio::test]
+async fn old_protocol_supported_but_no_fallback_provided() {
+	sp_tracing::try_init_simple();
+
+	// `litep2p1` supports both the new and the old protocol
+	let (mut litep2p1, handle1_1, handle1_2) = {
+		let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
+			.with_max_size(1024)
+			.build();
+
+		let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+			.with_max_size(1024)
+			.build();
+		(
+			Litep2p::new(
+				Litep2pConfigBuilder::new()
+					.with_request_response_protocol(config1)
+					.with_request_response_protocol(config2)
+					.with_tcp(TcpConfig {
+						listen_addresses: vec![
+							"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+							"/ip6/::/tcp/0".parse().unwrap(),
+						],
+						..Default::default()
+					})
+					.build(),
+			)
+			.unwrap(),
+			handle1,
+			handle2,
+		)
+	};
+
+	// `litep2p2` supports only the old protocol
+	let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
+		.with_max_size(1024)
+		.build();
+
+	let mut litep2p2 = Litep2p::new(
+		Litep2pConfigBuilder::new()
+			.with_request_response_protocol(config2)
+			.with_tcp(TcpConfig {
+				listen_addresses: vec![
+					"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
+					"/ip6/::/tcp/0".parse().unwrap(),
+				],
+				..Default::default()
+			})
+			.build(),
+	)
+	.unwrap();
+
+	let peer2 = *litep2p2.local_peer_id();
+
+	connect_peers(&mut litep2p1, &mut litep2p2).await;
+
+	let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
+	let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
+
+	let senders1 = HashMap::from_iter([
+		(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
+		(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
+	]);
+
+	let (tx1, _rx1) = async_channel::bounded(4);
+	let protocol1 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/2"),
+		handle1_1,
+		Arc::new(peerstore_handle_test()),
+		Some(tx1),
+		outbound_rx1,
+		senders1.clone(),
+		None,
+	);
+
+	let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
+	let protocol_fallback = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle1_2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx_fallback),
+		outbound_rx_fallback,
+		senders1,
+		None,
+	);
+
+	let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
+	let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
+
+	let (tx2, _rx2) = async_channel::bounded(4);
+	let protocol2 = RequestResponseProtocol::new(
+		ProtocolName::from("/protocol/1"),
+		handle2,
+		Arc::new(peerstore_handle_test()),
+		Some(tx2),
+		outbound_rx2,
+		senders2,
+		None,
+	);
+
+	tokio::spawn(protocol1.run());
+	tokio::spawn(protocol2.run());
+	tokio::spawn(protocol_fallback.run());
+	tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
+	tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
+
+	let (result_tx, result_rx) = oneshot::channel();
+	outbound_tx1
+		.unbounded_send(OutboundRequest {
+			peer: peer2.into(),
+			request: vec![1, 2, 3, 4],
+			sender: result_tx,
+			fallback_request: None,
+			dial_behavior: IfDisconnected::ImmediateError,
+		})
+		.unwrap();
+
+	match result_rx.await {
+		Ok(Err(error)) => {
+			assert!(std::matches!(error, RequestFailure::Refused));
+		},
+		event => panic!("invalid event: {event:?}"),
+	}
+}
diff --git a/substrate/client/network/src/mock.rs b/substrate/client/network/src/mock.rs
index 534b81189707163340f8887d16afb401a045dd24..f6c69227fda15241a84903328be03ef9f5ac4bc3 100644
--- a/substrate/client/network/src/mock.rs
+++ b/substrate/client/network/src/mock.rs
@@ -18,10 +18,15 @@
 
 //! Mocked components for tests.
 
-use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange};
-use libp2p::PeerId;
+use crate::{
+	peer_store::{PeerStoreProvider, ProtocolHandle},
+	ReputationChange,
+};
+
 use sc_network_common::role::ObservedRole;
-use std::collections::HashSet;
+use sc_network_types::PeerId;
+
+use std::{collections::HashSet, sync::Arc};
 
 /// No-op `PeerStore`.
 #[derive(Debug)]
@@ -33,15 +38,15 @@ impl PeerStoreProvider for MockPeerStore {
 		false
 	}
 
-	fn register_protocol(&self, _protocol_handle: ProtocolHandle) {
+	fn register_protocol(&self, _protocol_handle: Arc<dyn ProtocolHandle>) {
 		// Make sure not to fail.
 	}
 
-	fn report_disconnect(&mut self, _peer_id: PeerId) {
+	fn report_disconnect(&self, _peer_id: PeerId) {
 		// Make sure not to fail.
 	}
 
-	fn report_peer(&mut self, _peer_id: PeerId, _change: ReputationChange) {
+	fn report_peer(&self, _peer_id: PeerId, _change: ReputationChange) {
 		// Make sure not to fail.
 	}
 
@@ -54,11 +59,19 @@ impl PeerStoreProvider for MockPeerStore {
 		None
 	}
 
-	fn set_peer_role(&mut self, _peer_id: &PeerId, _role: ObservedRole) {
+	fn set_peer_role(&self, _peer_id: &PeerId, _role: ObservedRole) {
 		unimplemented!();
 	}
 
-	fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec<PeerId> {
+	fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<PeerId>) -> Vec<PeerId> {
+		unimplemented!()
+	}
+
+	fn num_known_peers(&self) -> usize {
+		0usize
+	}
+
+	fn add_known_peer(&self, _peer_id: PeerId) {
 		unimplemented!()
 	}
 }
diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs
index 4b28b8e7544031139f2906df7d109b24fafe9230..15d5b746c2982b2e789f8366fbd9d15b46499ca6 100644
--- a/substrate/client/network/src/peer_store.rs
+++ b/substrate/client/network/src/peer_store.rs
@@ -19,7 +19,8 @@
 //! [`PeerStore`] manages peer reputations and provides connection candidates to
 //! [`crate::protocol_controller::ProtocolController`].
 
-use libp2p::PeerId;
+use crate::{service::traits::PeerStore as PeerStoreT, PeerId};
+
 use log::trace;
 use parking_lot::Mutex;
 use partial_sort::PartialSort;
@@ -33,8 +34,6 @@ use std::{
 };
 use wasm_timer::Delay;
 
-use crate::protocol_controller::ProtocolHandle;
-
 /// Log target for this file.
 pub const LOG_TARGET: &str = "peerset";
 
@@ -52,31 +51,50 @@ const INVERSE_DECREMENT: i32 = 50;
 /// remove it, once the reputation value reaches 0.
 const FORGET_AFTER: Duration = Duration::from_secs(3600);
 
+/// Trait describing the required functionality from a `Peerset` handle.
+pub trait ProtocolHandle: Debug + Send + Sync {
+	/// Disconnect peer.
+	fn disconnect_peer(&self, peer_id: sc_network_types::PeerId);
+}
+
 /// Trait providing peer reputation management and connection candidates.
-pub trait PeerStoreProvider: Debug + Send {
+pub trait PeerStoreProvider: Debug + Send + Sync {
 	/// Check whether the peer is banned.
-	fn is_banned(&self, peer_id: &PeerId) -> bool;
+	fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool;
 
 	/// Register a protocol handle to disconnect peers whose reputation drops below the threshold.
-	fn register_protocol(&self, protocol_handle: ProtocolHandle);
+	fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>);
 
 	/// Report peer disconnection for reputation adjustment.
-	fn report_disconnect(&mut self, peer_id: PeerId);
+	fn report_disconnect(&self, peer_id: sc_network_types::PeerId);
 
 	/// Adjust peer reputation.
-	fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
+	fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange);
 
 	/// Set peer role.
-	fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
+	fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole);
 
 	/// Get peer reputation.
-	fn peer_reputation(&self, peer_id: &PeerId) -> i32;
+	fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32;
 
 	/// Get peer role, if available.
-	fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
+	fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole>;
 
 	/// Get candidates with highest reputations for initiating outgoing connections.
-	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId>;
+	fn outgoing_candidates(
+		&self,
+		count: usize,
+		ignored: HashSet<sc_network_types::PeerId>,
+	) -> Vec<sc_network_types::PeerId>;
+
+	/// Get the number of known peers.
+	///
+	/// This number might not include some connected peers in rare cases when their reputation
+	/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
+	fn num_known_peers(&self) -> usize;
+
+	/// Add known peer.
+	fn add_known_peer(&self, peer_id: sc_network_types::PeerId);
 }
 
 /// Actual implementation of peer reputations and connection candidates provider.
@@ -86,51 +104,56 @@ pub struct PeerStoreHandle {
 }
 
 impl PeerStoreProvider for PeerStoreHandle {
-	fn is_banned(&self, peer_id: &PeerId) -> bool {
-		self.inner.lock().is_banned(peer_id)
+	fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool {
+		self.inner.lock().is_banned(&peer_id.into())
 	}
 
-	fn register_protocol(&self, protocol_handle: ProtocolHandle) {
+	fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>) {
 		self.inner.lock().register_protocol(protocol_handle);
 	}
 
-	fn report_disconnect(&mut self, peer_id: PeerId) {
-		self.inner.lock().report_disconnect(peer_id)
+	fn report_disconnect(&self, peer_id: sc_network_types::PeerId) {
+		let mut inner = self.inner.lock();
+		inner.report_disconnect(peer_id.into())
 	}
 
-	fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange) {
-		self.inner.lock().report_peer(peer_id, change)
+	fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange) {
+		let mut inner = self.inner.lock();
+		inner.report_peer(peer_id.into(), change)
 	}
 
-	fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) {
-		self.inner.lock().set_peer_role(peer_id, role)
+	fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole) {
+		let mut inner = self.inner.lock();
+		inner.set_peer_role(&peer_id.into(), role)
 	}
 
-	fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
-		self.inner.lock().peer_reputation(peer_id)
+	fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32 {
+		self.inner.lock().peer_reputation(&peer_id.into())
 	}
 
-	fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole> {
-		self.inner.lock().peer_role(peer_id)
+	fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole> {
+		self.inner.lock().peer_role(&peer_id.into())
 	}
 
-	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
-		self.inner.lock().outgoing_candidates(count, ignored)
+	fn outgoing_candidates(
+		&self,
+		count: usize,
+		ignored: HashSet<sc_network_types::PeerId>,
+	) -> Vec<sc_network_types::PeerId> {
+		self.inner
+			.lock()
+			.outgoing_candidates(count, ignored.iter().map(|peer_id| (*peer_id).into()).collect())
+			.iter()
+			.map(|peer_id| peer_id.into())
+			.collect()
 	}
-}
 
-impl PeerStoreHandle {
-	/// Get the number of known peers.
-	///
-	/// This number might not include some connected peers in rare cases when their reputation
-	/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
-	pub fn num_known_peers(&self) -> usize {
+	fn num_known_peers(&self) -> usize {
 		self.inner.lock().peers.len()
 	}
 
-	/// Add known peer.
-	pub fn add_known_peer(&mut self, peer_id: PeerId) {
-		self.inner.lock().add_known_peer(peer_id);
+	fn add_known_peer(&self, peer_id: sc_network_types::PeerId) {
+		self.inner.lock().add_known_peer(peer_id.into());
 	}
 }
 
@@ -210,7 +233,7 @@ impl PeerInfo {
 #[derive(Debug)]
 struct PeerStoreInner {
 	peers: HashMap<PeerId, PeerInfo>,
-	protocols: Vec<ProtocolHandle>,
+	protocols: Vec<Arc<dyn ProtocolHandle>>,
 }
 
 impl PeerStoreInner {
@@ -218,7 +241,7 @@ impl PeerStoreInner {
 		self.peers.get(peer_id).map_or(false, |info| info.is_banned())
 	}
 
-	fn register_protocol(&mut self, protocol_handle: ProtocolHandle) {
+	fn register_protocol(&mut self, protocol_handle: Arc<dyn ProtocolHandle>) {
 		self.protocols.push(protocol_handle);
 	}
 
@@ -240,7 +263,7 @@ impl PeerStoreInner {
 		peer_info.add_reputation(change.value);
 
 		if peer_info.reputation < BANNED_THRESHOLD {
-			self.protocols.iter().for_each(|handle| handle.disconnect_peer(peer_id));
+			self.protocols.iter().for_each(|handle| handle.disconnect_peer(peer_id.into()));
 
 			log::warn!(
 				target: LOG_TARGET,
@@ -283,7 +306,7 @@ impl PeerStoreInner {
 		self.peers.get(peer_id).map_or(None, |info| info.role)
 	}
 
-	fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
+	fn outgoing_candidates(&self, count: usize, ignored: HashSet<PeerId>) -> Vec<PeerId> {
 		let mut candidates = self
 			.peers
 			.iter()
@@ -378,6 +401,17 @@ impl PeerStore {
 	}
 }
 
+#[async_trait::async_trait]
+impl PeerStoreT for PeerStore {
+	fn handle(&self) -> Arc<dyn PeerStoreProvider> {
+		Arc::new(self.handle())
+	}
+
+	async fn run(self) {
+		self.run().await;
+	}
+}
+
 #[cfg(test)]
 mod tests {
 	use super::PeerInfo;
diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index 73b1cd97279637380a24ebdfddcd3378aad819cb..2e57ff1b6a86f7512acd704c755692daeec51a0f 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -18,9 +18,9 @@
 
 use crate::{
 	config, error,
-	peer_store::{PeerStoreHandle, PeerStoreProvider},
+	peer_store::PeerStoreProvider,
 	protocol_controller::{self, SetId},
-	service::traits::Direction,
+	service::{metrics::NotificationMetrics, traits::Direction},
 	types::ProtocolName,
 };
 
@@ -36,14 +36,13 @@ use libp2p::{
 use log::warn;
 
 use codec::DecodeAll;
-use prometheus_endpoint::Registry;
 use sc_network_common::role::Roles;
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_runtime::traits::Block as BlockT;
 
-use std::{collections::HashSet, iter, task::Poll};
+use std::{collections::HashSet, iter, sync::Arc, task::Poll};
 
-use notifications::{metrics, Notifications, NotificationsOut};
+use notifications::{Notifications, NotificationsOut};
 
 pub(crate) use notifications::ProtocolHandle;
 
@@ -69,7 +68,7 @@ pub struct Protocol<B: BlockT> {
 	/// List of notifications protocols that have been registered.
 	notification_protocols: Vec<ProtocolName>,
 	/// Handle to `PeerStore`.
-	peer_store_handle: PeerStoreHandle,
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
 	/// Streams for peers whose handshake couldn't be determined.
 	bad_handshake_streams: HashSet<PeerId>,
 	sync_handle: ProtocolHandle,
@@ -80,10 +79,10 @@ impl<B: BlockT> Protocol<B> {
 	/// Create a new instance.
 	pub(crate) fn new(
 		roles: Roles,
-		registry: &Option<Registry>,
+		notification_metrics: NotificationMetrics,
 		notification_protocols: Vec<config::NonDefaultSetConfig>,
 		block_announces_protocol: config::NonDefaultSetConfig,
-		peer_store_handle: PeerStoreHandle,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
 		protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
 		from_protocol_controllers: TracingUnboundedReceiver<protocol_controller::Message>,
 	) -> error::Result<(Self, Vec<ProtocolHandle>)> {
@@ -122,16 +121,15 @@ impl<B: BlockT> Protocol<B> {
 			}))
 			.unzip();
 
-			let metrics = registry.as_ref().and_then(|registry| metrics::register(&registry).ok());
 			handles.iter_mut().for_each(|handle| {
-				handle.set_metrics(metrics.clone());
+				handle.set_metrics(notification_metrics.clone());
 			});
 
 			(
 				Notifications::new(
 					protocol_controller_handles,
 					from_protocol_controllers,
-					metrics,
+					notification_metrics,
 					protocol_configs.into_iter(),
 				),
 				installed_protocols,
@@ -179,7 +177,7 @@ impl<B: BlockT> Protocol<B> {
 	fn role_available(&self, peer_id: &PeerId, handshake: &Vec<u8>) -> bool {
 		match Roles::decode_all(&mut &handshake[..]) {
 			Ok(_) => true,
-			Err(_) => self.peer_store_handle.peer_role(&peer_id).is_some(),
+			Err(_) => self.peer_store_handle.peer_role(&((*peer_id).into())).is_some(),
 		}
 	}
 }
diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs
index 8becc1390e7d479b8386d17c03ca73adfc0b127e..10fa329097d1b662350d50aee9ddf77de0d09e21 100644
--- a/substrate/client/network/src/protocol/notifications.rs
+++ b/substrate/client/network/src/protocol/notifications.rs
@@ -25,7 +25,7 @@ pub use self::{
 	service::{notification_service, ProtocolHandlePair},
 };
 
-pub(crate) use self::service::{metrics, ProtocolHandle};
+pub(crate) use self::service::ProtocolHandle;
 
 mod behaviour;
 mod handler;
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index b945d4bfc6043cccbeb41219d2f25edd33c5d510..03ba437a66726caf6c284fc3863c4aca01118488 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -19,10 +19,13 @@
 use crate::{
 	protocol::notifications::{
 		handler::{self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut},
-		service::{metrics, NotificationCommand, ProtocolHandle, ValidationCallResult},
+		service::{NotificationCommand, ProtocolHandle, ValidationCallResult},
 	},
 	protocol_controller::{self, IncomingIndex, Message, SetId},
-	service::traits::{Direction, ValidationResult},
+	service::{
+		metrics::NotificationMetrics,
+		traits::{Direction, ValidationResult},
+	},
 	types::ProtocolName,
 };
 
@@ -167,7 +170,7 @@ pub struct Notifications {
 	pending_inbound_validations: FuturesUnordered<PendingInboundValidation>,
 
 	/// Metrics for notifications.
-	metrics: Option<metrics::Metrics>,
+	metrics: NotificationMetrics,
 }
 
 /// Configuration for a notifications protocol.
@@ -404,7 +407,7 @@ impl Notifications {
 	pub(crate) fn new(
 		protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
 		from_protocol_controllers: TracingUnboundedReceiver<Message>,
-		metrics: Option<metrics::Metrics>,
+		metrics: NotificationMetrics,
 		notif_protocols: impl Iterator<
 			Item = (
 				ProtocolConfig,
@@ -1230,7 +1233,7 @@ impl NetworkBehaviour for Notifications {
 				send_back_addr: remote_addr.clone(),
 			},
 			self.notif_protocols.clone(),
-			self.metrics.clone(),
+			Some(self.metrics.clone()),
 		))
 	}
 
@@ -1245,7 +1248,7 @@ impl NetworkBehaviour for Notifications {
 			peer,
 			ConnectedPoint::Dialer { address: addr.clone(), role_override },
 			self.notif_protocols.clone(),
-			self.metrics.clone(),
+			Some(self.metrics.clone()),
 		))
 	}
 
@@ -2442,7 +2445,7 @@ mod tests {
 				reserved_only: false,
 			},
 			to_notifications,
-			Box::new(MockPeerStore {}),
+			Arc::new(MockPeerStore {}),
 		);
 
 		let (notif_handle, command_stream) = protocol_handle_pair.split();
@@ -2450,7 +2453,7 @@ mod tests {
 			Notifications::new(
 				vec![handle],
 				from_controller,
-				None,
+				NotificationMetrics::new(None),
 				iter::once((
 					ProtocolConfig {
 						name: "/foo".into(),
diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs
index 391252c3ffe71cdf3f7e9b5d1efc0ee5ad0ffcc6..aa047feb87b2fb569b4c6c029e2b158445357527 100644
--- a/substrate/client/network/src/protocol/notifications/handler.rs
+++ b/substrate/client/network/src/protocol/notifications/handler.rs
@@ -58,13 +58,11 @@
 //! [`NotifsHandlerIn::Open`] has gotten an answer.
 
 use crate::{
-	protocol::notifications::{
-		service::metrics,
-		upgrade::{
-			NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
-			UpgradeCollec,
-		},
+	protocol::notifications::upgrade::{
+		NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
+		UpgradeCollec,
 	},
+	service::metrics::NotificationMetrics,
 	types::ProtocolName,
 };
 
@@ -131,7 +129,7 @@ pub struct NotifsHandler {
 	>,
 
 	/// Metrics.
-	metrics: Option<Arc<metrics::Metrics>>,
+	metrics: Option<Arc<NotificationMetrics>>,
 }
 
 impl NotifsHandler {
@@ -140,7 +138,7 @@ impl NotifsHandler {
 		peer_id: PeerId,
 		endpoint: ConnectedPoint,
 		protocols: Vec<ProtocolConfig>,
-		metrics: Option<metrics::Metrics>,
+		metrics: Option<NotificationMetrics>,
 	) -> Self {
 		Self {
 			protocols: protocols
@@ -345,7 +343,7 @@ pub enum NotifsHandlerOut {
 #[derive(Debug, Clone)]
 pub struct NotificationsSink {
 	inner: Arc<NotificationsSinkInner>,
-	metrics: Option<Arc<metrics::Metrics>>,
+	metrics: Option<Arc<NotificationMetrics>>,
 }
 
 impl NotificationsSink {
@@ -372,7 +370,7 @@ impl NotificationsSink {
 	}
 
 	/// Get reference to metrics.
-	pub fn metrics(&self) -> &Option<Arc<metrics::Metrics>> {
+	pub fn metrics(&self) -> &Option<Arc<NotificationMetrics>> {
 		&self.metrics
 	}
 }
diff --git a/substrate/client/network/src/protocol/notifications/service/metrics.rs b/substrate/client/network/src/protocol/notifications/service/metrics.rs
index 2a57d57c17576fde27be047d747c94fdd19e64e5..f24f73fc5567cfa64d5ed2d3600598d6d2416a1f 100644
--- a/substrate/client/network/src/protocol/notifications/service/metrics.rs
+++ b/substrate/client/network/src/protocol/notifications/service/metrics.rs
@@ -16,115 +16,40 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::types::ProtocolName;
-
-use prometheus_endpoint::{
-	self as prometheus, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry,
-	U64,
-};
-
-use std::sync::Arc;
-
-/// Notification metrics.
-#[derive(Debug, Clone)]
-pub struct Metrics {
-	// Total number of opened substreams.
-	pub notifications_streams_opened_total: CounterVec<U64>,
-
-	/// Total number of closed substreams.
-	pub notifications_streams_closed_total: CounterVec<U64>,
-
-	/// In/outbound notification sizes.
-	pub notifications_sizes: HistogramVec,
-}
-
-impl Metrics {
-	fn register(registry: &Registry) -> Result<Self, PrometheusError> {
-		Ok(Self {
-			notifications_sizes: prometheus::register(
-				HistogramVec::new(
-					HistogramOpts {
-						common_opts: Opts::new(
-							"substrate_sub_libp2p_notifications_sizes",
-							"Sizes of the notifications send to and received from all nodes",
-						),
-						buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
-							.expect("parameters are always valid values; qed"),
-					},
-					&["direction", "protocol"],
-				)?,
-				registry,
-			)?,
-			notifications_streams_closed_total: prometheus::register(
-				CounterVec::new(
-					Opts::new(
-						"substrate_sub_libp2p_notifications_streams_closed_total",
-						"Total number of notification substreams that have been closed",
-					),
-					&["protocol"],
-				)?,
-				registry,
-			)?,
-			notifications_streams_opened_total: prometheus::register(
-				CounterVec::new(
-					Opts::new(
-						"substrate_sub_libp2p_notifications_streams_opened_total",
-						"Total number of notification substreams that have been opened",
-					),
-					&["protocol"],
-				)?,
-				registry,
-			)?,
-		})
-	}
-}
-
-/// Register metrics.
-pub fn register(registry: &Registry) -> Result<Metrics, PrometheusError> {
-	Metrics::register(registry)
-}
+use crate::{service::metrics::NotificationMetrics, types::ProtocolName};
 
 /// Register opened substream to Prometheus.
-pub fn register_substream_opened(metrics: &Option<Metrics>, protocol: &ProtocolName) {
+pub fn register_substream_opened(metrics: &Option<NotificationMetrics>, protocol: &ProtocolName) {
 	if let Some(metrics) = metrics {
-		metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc();
+		metrics.register_substream_opened(&protocol);
 	}
 }
 
 /// Register closed substream to Prometheus.
-pub fn register_substream_closed(metrics: &Option<Metrics>, protocol: &ProtocolName) {
+pub fn register_substream_closed(metrics: &Option<NotificationMetrics>, protocol: &ProtocolName) {
 	if let Some(metrics) = metrics {
-		metrics
-			.notifications_streams_closed_total
-			.with_label_values(&[&protocol[..]])
-			.inc();
+		metrics.register_substream_closed(&protocol);
 	}
 }
 
 /// Register sent notification to Prometheus.
 pub fn register_notification_sent(
-	metrics: &Option<Arc<Metrics>>,
+	metrics: &Option<std::sync::Arc<NotificationMetrics>>,
 	protocol: &ProtocolName,
 	size: usize,
 ) {
 	if let Some(metrics) = metrics {
-		metrics
-			.notifications_sizes
-			.with_label_values(&["out", protocol])
-			.observe(size as f64);
+		metrics.register_notification_sent(protocol, size);
 	}
 }
 
 /// Register received notification to Prometheus.
 pub fn register_notification_received(
-	metrics: &Option<Metrics>,
+	metrics: &Option<NotificationMetrics>,
 	protocol: &ProtocolName,
 	size: usize,
 ) {
 	if let Some(metrics) = metrics {
-		metrics
-			.notifications_sizes
-			.with_label_values(&["in", protocol])
-			.observe(size as f64);
+		metrics.register_notification_received(protocol, size);
 	}
 }
diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs
index dfb19daa28efee310d65275d57ac294543e69321..15d289d170ee83e52476d4b5a1c5a10bde3274c8 100644
--- a/substrate/client/network/src/protocol/notifications/service/mod.rs
+++ b/substrate/client/network/src/protocol/notifications/service/mod.rs
@@ -21,17 +21,20 @@
 use crate::{
 	error,
 	protocol::notifications::handler::NotificationsSink,
-	service::traits::{
-		Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult,
+	service::{
+		metrics::NotificationMetrics,
+		traits::{
+			Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult,
+		},
 	},
 	types::ProtocolName,
+	PeerId,
 };
 
 use futures::{
 	stream::{FuturesUnordered, Stream},
 	StreamExt,
 };
-use libp2p::PeerId;
 use parking_lot::Mutex;
 use tokio::sync::{mpsc, oneshot};
 use tokio_stream::wrappers::ReceiverStream;
@@ -66,7 +69,7 @@ impl MessageSink for NotificationSink {
 	fn send_sync_notification(&self, notification: Vec<u8>) {
 		let sink = self.lock();
 
-		metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification.len());
+		metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification.len());
 		sink.0.send_sync_notification(notification);
 	}
 
@@ -87,7 +90,7 @@ impl MessageSink for NotificationSink {
 			.map_err(|_| error::Error::ConnectionClosed)?;
 
 		permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| {
-			metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification_len);
+			metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification_len);
 			res
 		})
 	}
@@ -220,20 +223,20 @@ impl NotificationHandle {
 #[async_trait::async_trait]
 impl NotificationService for NotificationHandle {
 	/// Instruct `Notifications` to open a new substream for `peer`.
-	async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+	async fn open_substream(&mut self, _peer: sc_network_types::PeerId) -> Result<(), ()> {
 		todo!("support for opening substreams not implemented yet");
 	}
 
 	/// Instruct `Notifications` to close substream for `peer`.
-	async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
+	async fn close_substream(&mut self, _peer: sc_network_types::PeerId) -> Result<(), ()> {
 		todo!("support for closing substreams not implemented yet, call `NetworkService::disconnect_peer()` instead");
 	}
 
 	/// Send synchronous `notification` to `peer`.
-	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>) {
-		if let Some(info) = self.peers.get(&peer) {
+	fn send_sync_notification(&mut self, peer: &sc_network_types::PeerId, notification: Vec<u8>) {
+		if let Some(info) = self.peers.get(&((*peer).into())) {
 			metrics::register_notification_sent(
-				&info.sink.metrics(),
+				info.sink.metrics(),
 				&self.protocol,
 				notification.len(),
 			);
@@ -244,12 +247,16 @@ impl NotificationService for NotificationHandle {
 
 	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
 	async fn send_async_notification(
-		&self,
-		peer: &PeerId,
+		&mut self,
+		peer: &sc_network_types::PeerId,
 		notification: Vec<u8>,
 	) -> Result<(), error::Error> {
 		let notification_len = notification.len();
-		let sink = &self.peers.get(&peer).ok_or_else(|| error::Error::PeerDoesntExist(*peer))?.sink;
+		let sink = &self
+			.peers
+			.get(&peer.into())
+			.ok_or_else(|| error::Error::PeerDoesntExist((*peer).into()))?
+			.sink;
 
 		sink.reserve_notification()
 			.await
@@ -258,7 +265,7 @@ impl NotificationService for NotificationHandle {
 			.map_err(|_| error::Error::ChannelClosed)
 			.map(|res| {
 				metrics::register_notification_sent(
-					&sink.metrics(),
+					sink.metrics(),
 					&self.protocol,
 					notification_len,
 				);
@@ -288,7 +295,7 @@ impl NotificationService for NotificationHandle {
 			match self.rx.next().await? {
 				InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } =>
 					return Some(NotificationEvent::ValidateInboundSubstream {
-						peer,
+						peer: peer.into(),
 						handshake,
 						result_tx,
 					}),
@@ -307,7 +314,7 @@ impl NotificationService for NotificationHandle {
 						},
 					);
 					return Some(NotificationEvent::NotificationStreamOpened {
-						peer,
+						peer: peer.into(),
 						handshake,
 						direction,
 						negotiated_fallback,
@@ -315,10 +322,13 @@ impl NotificationService for NotificationHandle {
 				},
 				InnerNotificationEvent::NotificationStreamClosed { peer } => {
 					self.peers.remove(&peer);
-					return Some(NotificationEvent::NotificationStreamClosed { peer })
+					return Some(NotificationEvent::NotificationStreamClosed { peer: peer.into() })
 				},
 				InnerNotificationEvent::NotificationReceived { peer, notification } =>
-					return Some(NotificationEvent::NotificationReceived { peer, notification }),
+					return Some(NotificationEvent::NotificationReceived {
+						peer: peer.into(),
+						notification,
+					}),
 				InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => {
 					match self.peers.get_mut(&peer) {
 						None => log::error!(
@@ -357,8 +367,8 @@ impl NotificationService for NotificationHandle {
 	}
 
 	/// Get message sink of the peer.
-	fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
-		match self.peers.get(peer) {
+	fn message_sink(&self, peer: &sc_network_types::PeerId) -> Option<Box<dyn MessageSink>> {
+		match self.peers.get(&peer.into()) {
 			Some(context) => Some(Box::new(context.shared_sink.clone())),
 			None => None,
 		}
@@ -417,7 +427,7 @@ pub(crate) struct ProtocolHandle {
 	delegate_to_peerset: bool,
 
 	/// Prometheus metrics.
-	metrics: Option<metrics::Metrics>,
+	metrics: Option<NotificationMetrics>,
 }
 
 pub(crate) enum ValidationCallResult {
@@ -432,8 +442,8 @@ impl ProtocolHandle {
 	}
 
 	/// Set metrics.
-	pub fn set_metrics(&mut self, metrics: Option<metrics::Metrics>) {
-		self.metrics = metrics;
+	pub fn set_metrics(&mut self, metrics: NotificationMetrics) {
+		self.metrics = Some(metrics);
 	}
 
 	/// Delegate validation to `Peerset`.
diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs
index 238e0ccf566898d9c19431a0d00bc14284d449e4..f0157f6d28dd10d7382427b7464e4f478d912c03 100644
--- a/substrate/client/network/src/protocol/notifications/service/tests.rs
+++ b/substrate/client/network/src/protocol/notifications/service/tests.rs
@@ -38,7 +38,7 @@ async fn validate_and_accept_substream() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -66,7 +66,7 @@ async fn substream_opened() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -92,7 +92,7 @@ async fn send_sync_notification() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -112,7 +112,7 @@ async fn send_sync_notification() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -120,7 +120,7 @@ async fn send_sync_notification() {
 		panic!("invalid event received");
 	}
 
-	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
+	notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
 	assert_eq!(
 		sync_rx.next().await,
 		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] })
@@ -144,7 +144,7 @@ async fn send_async_notification() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -164,7 +164,7 @@ async fn send_async_notification() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -172,7 +172,7 @@ async fn send_async_notification() {
 		panic!("invalid event received");
 	}
 
-	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+	notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
 	assert_eq!(
 		async_rx.next().await,
 		Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] })
@@ -181,24 +181,24 @@ async fn send_async_notification() {
 
 #[tokio::test]
 async fn send_sync_notification_to_non_existent_peer() {
-	let (proto, notif) = notification_service("/proto/1".into());
+	let (proto, mut notif) = notification_service("/proto/1".into());
 	let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
 	let (_handle, _stream) = proto.split();
 	let peer = PeerId::random();
 
 	// as per the original implementation, the call doesn't fail
-	notif.send_sync_notification(&peer, vec![1, 3, 3, 7])
+	notif.send_sync_notification(&peer.into(), vec![1, 3, 3, 7])
 }
 
 #[tokio::test]
 async fn send_async_notification_to_non_existent_peer() {
-	let (proto, notif) = notification_service("/proto/1".into());
+	let (proto, mut notif) = notification_service("/proto/1".into());
 	let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
 	let (_handle, _stream) = proto.split();
 	let peer = PeerId::random();
 
 	if let Err(error::Error::PeerDoesntExist(peer_id)) =
-		notif.send_async_notification(&peer, vec![1, 3, 3, 7]).await
+		notif.send_async_notification(&peer.into(), vec![1, 3, 3, 7]).await
 	{
 		assert_eq!(peer, peer_id);
 	} else {
@@ -223,7 +223,7 @@ async fn receive_notification() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -243,7 +243,7 @@ async fn receive_notification() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -257,7 +257,7 @@ async fn receive_notification() {
 	if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(notification, vec![1, 3, 3, 8]);
 	} else {
 		panic!("invalid event received");
@@ -281,7 +281,7 @@ async fn backpressure_works() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -301,7 +301,7 @@ async fn backpressure_works() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -311,12 +311,15 @@ async fn backpressure_works() {
 
 	// fill the message buffer with messages
 	for i in 0..=ASYNC_NOTIFICATIONS_BUFFER_SIZE {
-		assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, i as u8]))
-			.is_ready());
+		assert!(futures::poll!(
+			notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, i as u8])
+		)
+		.is_ready());
 	}
 
 	// try to send one more message and verify that the call blocks
-	assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_pending());
+	assert!(futures::poll!(notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]))
+		.is_pending());
 
 	// release one slot from the buffer for new message
 	assert_eq!(
@@ -325,7 +328,9 @@ async fn backpressure_works() {
 	);
 
 	// verify that a message can be sent
-	assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_ready());
+	assert!(
+		futures::poll!(notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9])).is_ready()
+	);
 }
 
 #[tokio::test]
@@ -345,7 +350,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -365,7 +370,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -379,7 +384,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
 	drop(sync_rx);
 
 	// as per documentation, error is not reported but the notification is silently dropped
-	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 7]);
+	notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 7]);
 }
 
 #[tokio::test]
@@ -399,7 +404,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -419,7 +424,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -434,7 +439,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
 
 	// as per documentation, error is not reported but the notification is silently dropped
 	if let Err(error::Error::ConnectionClosed) =
-		notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await
+		notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 7]).await
 	{
 	} else {
 		panic!("invalid state after calling `send_async_notification()` on closed connection")
@@ -460,7 +465,7 @@ async fn cloned_service_opening_substream_works() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif1.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -475,7 +480,7 @@ async fn cloned_service_opening_substream_works() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif2.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -505,7 +510,7 @@ async fn cloned_service_one_service_rejects_substream() {
 		if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 			notif.next_event().await
 		{
-			assert_eq!(peer_id, peer);
+			assert_eq!(peer_id, peer.into());
 			assert_eq!(handshake, vec![1, 3, 3, 7]);
 			let _ = result_tx.send(ValidationResult::Accept).unwrap();
 		} else {
@@ -519,7 +524,7 @@ async fn cloned_service_one_service_rejects_substream() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif3.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Reject).unwrap();
 	} else {
@@ -549,7 +554,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
 		if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 			notif.next_event().await
 		{
-			assert_eq!(peer_id, peer);
+			assert_eq!(peer_id, peer.into());
 			assert_eq!(handshake, vec![1, 3, 3, 7]);
 			let _ = result_tx.send(ValidationResult::Accept).unwrap();
 		} else {
@@ -571,7 +576,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
 			direction,
 		}) = notif.next_event().await
 		{
-			assert_eq!(peer_id, peer);
+			assert_eq!(peer_id, peer.into());
 			assert_eq!(negotiated_fallback, None);
 			assert_eq!(handshake, vec![1, 3, 3, 7]);
 			assert_eq!(direction, Direction::Inbound);
@@ -586,16 +591,16 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
 		if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
 			notif.next_event().await
 		{
-			assert_eq!(peer_id, peer);
+			assert_eq!(peer_id, peer.into());
 			assert_eq!(notification, vec![1, 3, 3, 8]);
 		} else {
 			panic!("invalid event received");
 		}
 	}
 
-	for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter().enumerate() {
+	for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter_mut().enumerate() {
 		// send notification from each service and verify peer receives it
-		notif.send_sync_notification(&peer_id, vec![1, 3, 3, i as u8]);
+		notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, i as u8]);
 		assert_eq!(
 			sync_rx.next().await,
 			Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, i as u8] })
@@ -608,7 +613,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
 	for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
 		if let Some(NotificationEvent::NotificationStreamClosed { peer }) = notif.next_event().await
 		{
-			assert_eq!(peer_id, peer);
+			assert_eq!(peer_id, peer.into());
 		} else {
 			panic!("invalid event received");
 		}
@@ -632,7 +637,7 @@ async fn sending_notifications_using_notifications_sink_works() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -652,7 +657,7 @@ async fn sending_notifications_using_notifications_sink_works() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -661,7 +666,7 @@ async fn sending_notifications_using_notifications_sink_works() {
 	}
 
 	// get a copy of the notification sink and send a synchronous notification using.
-	let sink = notif.message_sink(&peer_id).unwrap();
+	let sink = notif.message_sink(&peer_id.into()).unwrap();
 	sink.send_sync_notification(vec![1, 3, 3, 6]);
 
 	// send an asynchronous notification using the acquired notifications sink.
@@ -677,8 +682,8 @@ async fn sending_notifications_using_notifications_sink_works() {
 	);
 
 	// send notifications using the stored notification sink as well.
-	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
-	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+	notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
 
 	assert_eq!(
 		sync_rx.next().await,
@@ -693,7 +698,7 @@ async fn sending_notifications_using_notifications_sink_works() {
 #[test]
 fn try_to_get_notifications_sink_for_non_existent_peer() {
 	let (_proto, notif) = notification_service("/proto/1".into());
-	assert!(notif.message_sink(&PeerId::random()).is_none());
+	assert!(notif.message_sink(&sc_network_types::PeerId::random()).is_none());
 }
 
 #[tokio::test]
@@ -713,7 +718,7 @@ async fn notification_sink_replaced() {
 	if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
 		notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		let _ = result_tx.send(ValidationResult::Accept).unwrap();
 	} else {
@@ -733,7 +738,7 @@ async fn notification_sink_replaced() {
 		direction,
 	}) = notif.next_event().await
 	{
-		assert_eq!(peer_id, peer);
+		assert_eq!(peer_id, peer.into());
 		assert_eq!(negotiated_fallback, None);
 		assert_eq!(handshake, vec![1, 3, 3, 7]);
 		assert_eq!(direction, Direction::Inbound);
@@ -742,7 +747,7 @@ async fn notification_sink_replaced() {
 	}
 
 	// get a copy of the notification sink and send a synchronous notification using.
-	let sink = notif.message_sink(&peer_id).unwrap();
+	let sink = notif.message_sink(&peer_id.into()).unwrap();
 	sink.send_sync_notification(vec![1, 3, 3, 6]);
 
 	// send an asynchronous notification using the acquired notifications sink.
@@ -758,8 +763,8 @@ async fn notification_sink_replaced() {
 	);
 
 	// send notifications using the stored notification sink as well.
-	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
-	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+	notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
 
 	assert_eq!(
 		sync_rx.next().await,
@@ -788,8 +793,8 @@ async fn notification_sink_replaced() {
 
 	// verify that using the `NotificationService` API automatically results in using the correct
 	// sink
-	notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
-	notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
+	notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
+	notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
 
 	assert_eq!(
 		new_sync_rx.next().await,
diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs
index 0178bd75e8b7984e90b4408275a35abe1a4d2acc..a72b5b4a6748f0754279cef3f37dd37f601263dd 100644
--- a/substrate/client/network/src/protocol/notifications/tests.rs
+++ b/substrate/client/network/src/protocol/notifications/tests.rs
@@ -22,7 +22,10 @@ use crate::{
 	peer_store::PeerStore,
 	protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig},
 	protocol_controller::{ProtoSetConfig, ProtocolController, SetId},
-	service::traits::{NotificationEvent, ValidationResult},
+	service::{
+		metrics::NotificationMetrics,
+		traits::{NotificationEvent, ValidationResult},
+	},
 };
 
 use futures::{future::BoxFuture, prelude::*};
@@ -40,6 +43,7 @@ use sc_utils::mpsc::tracing_unbounded;
 use std::{
 	iter,
 	pin::Pin,
+	sync::Arc,
 	task::{Context, Poll},
 	time::Duration,
 };
@@ -91,7 +95,7 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
 				reserved_only: false,
 			},
 			to_notifications,
-			Box::new(peer_store.handle()),
+			Arc::new(peer_store.handle()),
 		);
 
 		let (notif_handle, command_stream) = protocol_handle_pair.split();
@@ -99,7 +103,7 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
 			inner: Notifications::new(
 				vec![controller_handle],
 				from_controller,
-				None,
+				NotificationMetrics::new(None),
 				iter::once((
 					ProtocolConfig {
 						name: "/foo".into(),
diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs
index 7f851fd8e9c99c10ff4b93e2f3ca6836ffc6de80..2c3e6744e328f3900790e747a3a8d998f4e6964e 100644
--- a/substrate/client/network/src/protocol_controller.rs
+++ b/substrate/client/network/src/protocol_controller.rs
@@ -41,19 +41,22 @@
 //! Even though this does not guarantee that `ProtocolController` and `Notifications` have the same
 //! view of the peers' states at any given moment, the eventual consistency is maintained.
 
+use crate::{
+	peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT},
+	PeerId,
+};
+
 use futures::{channel::oneshot, future::Either, FutureExt, StreamExt};
-use libp2p::PeerId;
 use log::{debug, error, trace, warn};
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_arithmetic::traits::SaturatedConversion;
 use std::{
 	collections::{HashMap, HashSet},
+	sync::Arc,
 	time::{Duration, Instant},
 };
 use wasm_timer::Delay;
 
-use crate::peer_store::PeerStoreProvider;
-
 /// Log target for this file.
 pub const LOG_TARGET: &str = "peerset";
 
@@ -230,6 +233,12 @@ impl ProtocolHandle {
 	}
 }
 
+impl ProtocolHandleT for ProtocolHandle {
+	fn disconnect_peer(&self, peer_id: sc_network_types::PeerId) {
+		let _ = self.actions_tx.unbounded_send(Action::DisconnectPeer(peer_id.into()));
+	}
+}
+
 /// Direction of a connection
 #[derive(Clone, Copy, Debug)]
 enum Direction {
@@ -289,7 +298,7 @@ pub struct ProtocolController {
 	to_notifications: TracingUnboundedSender<Message>,
 	/// `PeerStore` handle for checking peer reputation values and getting connection candidates
 	/// with highest reputation.
-	peer_store: Box<dyn PeerStoreProvider>,
+	peer_store: Arc<dyn PeerStoreProvider>,
 }
 
 impl ProtocolController {
@@ -298,12 +307,12 @@ impl ProtocolController {
 		set_id: SetId,
 		config: ProtoSetConfig,
 		to_notifications: TracingUnboundedSender<Message>,
-		peer_store: Box<dyn PeerStoreProvider>,
+		peer_store: Arc<dyn PeerStoreProvider>,
 	) -> (ProtocolHandle, ProtocolController) {
 		let (actions_tx, actions_rx) = tracing_unbounded("mpsc_api_protocol", 10_000);
 		let (events_tx, events_rx) = tracing_unbounded("mpsc_notifications_protocol", 10_000);
 		let handle = ProtocolHandle { actions_tx, events_tx };
-		peer_store.register_protocol(handle.clone());
+		peer_store.register_protocol(Arc::new(handle.clone()));
 		let reserved_nodes =
 			config.reserved_nodes.iter().map(|p| (*p, PeerState::NotConnected)).collect();
 		let controller = ProtocolController {
@@ -445,12 +454,12 @@ impl ProtocolController {
 	/// Report peer disconnect event to `PeerStore` for it to update peer's reputation accordingly.
 	/// Should only be called if the remote node disconnected us, not the other way around.
 	fn report_disconnect(&mut self, peer_id: PeerId) {
-		self.peer_store.report_disconnect(peer_id);
+		self.peer_store.report_disconnect(peer_id.into());
 	}
 
 	/// Ask `Peerset` if the peer has a reputation value not sufficient for connection with it.
 	fn is_banned(&self, peer_id: &PeerId) -> bool {
-		self.peer_store.is_banned(peer_id)
+		self.peer_store.is_banned(&peer_id.into())
 	}
 
 	/// Add the peer to the set of reserved peers. [`ProtocolController`] will try to always
@@ -665,7 +674,7 @@ impl ProtocolController {
 					self.accept_connection(peer_id, incoming_index);
 				},
 				PeerState::NotConnected =>
-					if self.peer_store.is_banned(&peer_id) {
+					if self.peer_store.is_banned(&peer_id.into()) {
 						self.reject_connection(peer_id, incoming_index);
 					} else {
 						*state = PeerState::Connected(Direction::Inbound);
@@ -778,7 +787,7 @@ impl ProtocolController {
 		self.reserved_nodes
 			.iter_mut()
 			.filter_map(|(peer_id, state)| {
-				(!state.is_connected() && !self.peer_store.is_banned(peer_id)).then(|| {
+				(!state.is_connected() && !self.peer_store.is_banned(&peer_id.into())).then(|| {
 					*state = PeerState::Connected(Direction::Outbound);
 					peer_id
 				})
@@ -803,8 +812,11 @@ impl ProtocolController {
 		let ignored = self
 			.reserved_nodes
 			.keys()
-			.collect::<HashSet<&PeerId>>()
-			.union(&self.nodes.keys().collect::<HashSet<&PeerId>>())
+			.map(From::from)
+			.collect::<HashSet<sc_network_types::PeerId>>()
+			.union(
+				&self.nodes.keys().map(From::from).collect::<HashSet<sc_network_types::PeerId>>(),
+			)
 			.cloned()
 			.collect();
 
@@ -813,16 +825,17 @@ impl ProtocolController {
 			.outgoing_candidates(available_slots, ignored)
 			.into_iter()
 			.filter_map(|peer_id| {
-				(!self.reserved_nodes.contains_key(&peer_id) && !self.nodes.contains_key(&peer_id))
-					.then_some(peer_id)
-					.or_else(|| {
-						error!(
-							target: LOG_TARGET,
-							"`PeerStore` returned a node we asked to ignore: {peer_id}.",
-						);
-						debug_assert!(false, "`PeerStore` returned a node we asked to ignore.");
-						None
-					})
+				(!self.reserved_nodes.contains_key(&peer_id.into()) &&
+					!self.nodes.contains_key(&peer_id.into()))
+				.then_some(peer_id)
+				.or_else(|| {
+					error!(
+						target: LOG_TARGET,
+						"`PeerStore` returned a node we asked to ignore: {peer_id}.",
+					);
+					debug_assert!(false, "`PeerStore` returned a node we asked to ignore.");
+					None
+				})
 			})
 			.collect::<Vec<_>>();
 
@@ -836,8 +849,8 @@ impl ProtocolController {
 
 		candidates.into_iter().take(available_slots).for_each(|peer_id| {
 			self.num_out += 1;
-			self.nodes.insert(peer_id, Direction::Outbound);
-			self.start_connection(peer_id);
+			self.nodes.insert(peer_id.into(), Direction::Outbound);
+			self.start_connection(peer_id.into());
 		})
 	}
 }
@@ -845,8 +858,10 @@ impl ProtocolController {
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::{peer_store::PeerStoreProvider, ReputationChange};
-	use libp2p::PeerId;
+	use crate::{
+		peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT},
+		PeerId, ReputationChange,
+	};
 	use sc_network_common::role::ObservedRole;
 	use sc_utils::mpsc::{tracing_unbounded, TryRecvError};
 	use std::collections::HashSet;
@@ -856,14 +871,16 @@ mod tests {
 		pub PeerStoreHandle {}
 
 		impl PeerStoreProvider for PeerStoreHandle {
-			fn is_banned(&self, peer_id: &PeerId) -> bool;
-			fn register_protocol(&self, protocol_handle: ProtocolHandle);
-			fn report_disconnect(&mut self, peer_id: PeerId);
-			fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
-			fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
-			fn peer_reputation(&self, peer_id: &PeerId) -> i32;
-			fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
-			fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec<PeerId>;
+			fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool;
+			fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandleT>);
+			fn report_disconnect(&self, peer_id: sc_network_types::PeerId);
+			fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole);
+			fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange);
+			fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32;
+			fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole>;
+			fn outgoing_candidates(&self, count: usize, ignored: HashSet<sc_network_types::PeerId>) -> Vec<sc_network_types::PeerId>;
+			fn num_known_peers(&self) -> usize;
+			fn add_known_peer(&self, peer_id: sc_network_types::PeerId);
 		}
 	}
 
@@ -887,7 +904,7 @@ mod tests {
 		peer_store.expect_report_disconnect().times(2).return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Add second reserved node at runtime (this currently calls `alloc_slots` internally).
 		controller.on_add_reserved_peer(reserved2);
@@ -948,7 +965,7 @@ mod tests {
 		peer_store.expect_is_banned().times(6).return_const(true);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Add second reserved node at runtime (this currently calls `alloc_slots` internally).
 		controller.on_add_reserved_peer(reserved2);
@@ -1000,7 +1017,7 @@ mod tests {
 		peer_store.expect_report_disconnect().times(2).return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Add second reserved node at runtime (this calls `alloc_slots` internally).
 		controller.on_add_reserved_peer(reserved2);
@@ -1042,7 +1059,7 @@ mod tests {
 	fn nodes_supplied_by_peer_store_are_connected() {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
-		let candidates = vec![peer1, peer2];
+		let candidates = vec![peer1.into(), peer2.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 0,
@@ -1058,7 +1075,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1092,7 +1109,7 @@ mod tests {
 		let reserved2 = PeerId::random();
 		let regular1 = PeerId::random();
 		let regular2 = PeerId::random();
-		let outgoing_candidates = vec![regular1, regular2];
+		let outgoing_candidates = vec![regular1.into(), regular2.into()];
 		let reserved_nodes = [reserved1, reserved2].iter().cloned().collect();
 
 		let config =
@@ -1105,7 +1122,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1128,8 +1145,8 @@ mod tests {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
 		let peer3 = PeerId::random();
-		let candidates1 = vec![peer1, peer2];
-		let candidates2 = vec![peer3];
+		let candidates1 = vec![peer1.into(), peer2.into()];
+		let candidates2 = vec![peer3.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 0,
@@ -1147,7 +1164,7 @@ mod tests {
 		peer_store.expect_report_disconnect().times(2).return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1214,7 +1231,7 @@ mod tests {
 		peer_store.expect_register_protocol().once().return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1240,7 +1257,7 @@ mod tests {
 		peer_store.expect_register_protocol().once().return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		let peer = PeerId::random();
 		let incoming_index = IncomingIndex(1);
@@ -1262,7 +1279,7 @@ mod tests {
 	fn disabling_reserved_only_mode_allows_to_connect_to_peers() {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
-		let candidates = vec![peer1, peer2];
+		let candidates = vec![peer1.into(), peer2.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 0,
@@ -1278,7 +1295,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1309,7 +1326,7 @@ mod tests {
 		let reserved2 = PeerId::random();
 		let regular1 = PeerId::random();
 		let regular2 = PeerId::random();
-		let outgoing_candidates = vec![regular1];
+		let outgoing_candidates = vec![regular1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1325,7 +1342,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert_eq!(controller.num_out, 0);
 		assert_eq!(controller.num_in, 0);
 
@@ -1383,7 +1400,7 @@ mod tests {
 		peer_store.expect_register_protocol().once().return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert_eq!(controller.reserved_nodes.len(), 2);
 		assert_eq!(controller.nodes.len(), 0);
 		assert_eq!(controller.num_out, 0);
@@ -1416,7 +1433,7 @@ mod tests {
 		peer_store.expect_is_banned().times(2).return_const(false);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Initiate connections.
 		controller.alloc_slots();
@@ -1460,10 +1477,13 @@ mod tests {
 		let mut peer_store = MockPeerStoreHandle::new();
 		peer_store.expect_register_protocol().once().return_const(());
 		peer_store.expect_is_banned().times(2).return_const(false);
-		peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
+		peer_store
+			.expect_outgoing_candidates()
+			.once()
+			.return_const(Vec::<sc_network_types::PeerId>::new());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `peer1` as inbound, `peer2` as outbound.
 		controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1493,7 +1513,7 @@ mod tests {
 	fn regular_nodes_stop_occupying_slots_when_become_reserved() {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
-		let outgoing_candidates = vec![peer1];
+		let outgoing_candidates = vec![peer1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1509,7 +1529,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `peer1` as outbound & `peer2` as inbound.
 		controller.alloc_slots();
@@ -1535,7 +1555,7 @@ mod tests {
 	fn disconnecting_regular_peers_work() {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
-		let outgoing_candidates = vec![peer1];
+		let outgoing_candidates = vec![peer1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1551,7 +1571,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `peer1` as outbound & `peer2` as inbound.
 		controller.alloc_slots();
@@ -1610,7 +1630,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `reserved1` as inbound & `reserved2` as outbound.
 		controller.on_incoming_connection(reserved1, IncomingIndex(1));
@@ -1650,7 +1670,7 @@ mod tests {
 	fn dropping_regular_peers_work() {
 		let peer1 = PeerId::random();
 		let peer2 = PeerId::random();
-		let outgoing_candidates = vec![peer1];
+		let outgoing_candidates = vec![peer1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1667,7 +1687,7 @@ mod tests {
 		peer_store.expect_report_disconnect().times(2).return_const(());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `peer1` as outbound & `peer2` as inbound.
 		controller.alloc_slots();
@@ -1718,7 +1738,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `reserved1` as inbound & `reserved2` as outbound.
 		controller.on_incoming_connection(reserved1, IncomingIndex(1));
@@ -1762,7 +1782,7 @@ mod tests {
 	fn incoming_request_for_connected_regular_node_switches_it_to_inbound() {
 		let regular1 = PeerId::random();
 		let regular2 = PeerId::random();
-		let outgoing_candidates = vec![regular1];
+		let outgoing_candidates = vec![regular1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1778,7 +1798,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert_eq!(controller.num_out, 0);
 		assert_eq!(controller.num_in, 0);
 
@@ -1814,7 +1834,7 @@ mod tests {
 	fn incoming_request_for_connected_node_is_rejected_if_its_banned() {
 		let regular1 = PeerId::random();
 		let regular2 = PeerId::random();
-		let outgoing_candidates = vec![regular1];
+		let outgoing_candidates = vec![regular1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 10,
@@ -1831,7 +1851,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert_eq!(controller.num_out, 0);
 		assert_eq!(controller.num_in, 0);
 
@@ -1867,7 +1887,7 @@ mod tests {
 	fn incoming_request_for_connected_node_is_rejected_if_no_slots_available() {
 		let regular1 = PeerId::random();
 		let regular2 = PeerId::random();
-		let outgoing_candidates = vec![regular1];
+		let outgoing_candidates = vec![regular1.into()];
 
 		let config = ProtoSetConfig {
 			in_peers: 1,
@@ -1883,7 +1903,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert_eq!(controller.num_out, 0);
 		assert_eq!(controller.num_in, 0);
 
@@ -1935,7 +1955,7 @@ mod tests {
 		peer_store.expect_is_banned().once().return_const(false);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Connect `peer1` as inbound.
 		controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1965,7 +1985,7 @@ mod tests {
 		peer_store.expect_is_banned().once().return_const(true);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 
 		// Incoming request.
 		controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1990,7 +2010,7 @@ mod tests {
 		peer_store.expect_is_banned().once().return_const(true);
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert!(controller.reserved_nodes.contains_key(&reserved1));
 
 		// Incoming request.
@@ -2017,7 +2037,7 @@ mod tests {
 		peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
 
 		let (_handle, mut controller) =
-			ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
+			ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
 		assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected)));
 
 		// Initiate connections
diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs
index 0cd1cf06bb33e5a04abe14c1ad9c2229baea82ab..fbf050a65713d1d66c361a1cde4e4a698089473d 100644
--- a/substrate/client/network/src/request_responses.rs
+++ b/substrate/client/network/src/request_responses.rs
@@ -36,6 +36,7 @@
 
 use crate::{
 	peer_store::{PeerStoreProvider, BANNED_THRESHOLD},
+	service::traits::RequestResponseConfig as RequestResponseConfigT,
 	types::ProtocolName,
 	ReputationChange,
 };
@@ -58,6 +59,7 @@ use std::{
 	io, iter,
 	ops::Deref,
 	pin::Pin,
+	sync::Arc,
 	task::{Context, Poll},
 	time::{Duration, Instant},
 };
@@ -129,11 +131,17 @@ pub struct ProtocolConfig {
 	pub inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
 }
 
+impl RequestResponseConfigT for ProtocolConfig {
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.name
+	}
+}
+
 /// A single request received by a peer on a request-response protocol.
 #[derive(Debug)]
 pub struct IncomingRequest {
 	/// Who sent the request.
-	pub peer: PeerId,
+	pub peer: sc_network_types::PeerId,
 
 	/// Request sent by the remote. Will always be smaller than
 	/// [`ProtocolConfig::max_request_size`].
@@ -288,7 +296,7 @@ pub struct RequestResponsesBehaviour {
 	send_feedback: HashMap<ProtocolRequestId, oneshot::Sender<()>>,
 
 	/// Primarily used to get a reputation of a node.
-	peer_store: Box<dyn PeerStoreProvider>,
+	peer_store: Arc<dyn PeerStoreProvider>,
 }
 
 /// Generated by the response builder and waiting to be processed.
@@ -305,7 +313,7 @@ impl RequestResponsesBehaviour {
 	/// the same protocol is passed twice.
 	pub fn new(
 		list: impl Iterator<Item = ProtocolConfig>,
-		peer_store: Box<dyn PeerStoreProvider>,
+		peer_store: Arc<dyn PeerStoreProvider>,
 	) -> Result<Self, RegisterError> {
 		let mut protocols = HashMap::new();
 		for protocol in list {
@@ -670,7 +678,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 							self.pending_responses_arrival_time
 								.insert((protocol.clone(), request_id).into(), Instant::now());
 
-							let reputation = self.peer_store.peer_reputation(&peer);
+							let reputation = self.peer_store.peer_reputation(&peer.into());
 
 							if reputation < BANNED_THRESHOLD {
 								log::debug!(
@@ -694,7 +702,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								// because the latter allocates an extra slot for every cloned
 								// sender.
 								let _ = resp_builder.try_send(IncomingRequest {
-									peer,
+									peer: peer.into(),
 									payload: request,
 									pending_response: tx,
 								});
@@ -1093,7 +1101,7 @@ mod tests {
 			.multiplex(libp2p::yamux::Config::default())
 			.boxed();
 
-		let behaviour = RequestResponsesBehaviour::new(list, Box::new(MockPeerStore {})).unwrap();
+		let behaviour = RequestResponsesBehaviour::new(list, Arc::new(MockPeerStore {})).unwrap();
 
 		let runtime = tokio::runtime::Runtime::new().unwrap();
 		let mut swarm = SwarmBuilder::with_executor(
diff --git a/substrate/client/network/bitswap/src/schema/bitswap.v1.2.0.proto b/substrate/client/network/src/schema/bitswap.v1.2.0.proto
similarity index 100%
rename from substrate/client/network/bitswap/src/schema/bitswap.v1.2.0.proto
rename to substrate/client/network/src/schema/bitswap.v1.2.0.proto
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index 47e23337633ba39d2aaf78bafa5920448a211b78..807c5b5a80afa682ef07b9ba2326a528b5d1c8f5 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -29,29 +29,33 @@
 
 use crate::{
 	behaviour::{self, Behaviour, BehaviourOut},
-	config::{parse_addr, FullNetworkConfiguration, MultiaddrWithPeerId, Params, TransportConfig},
+	bitswap::BitswapRequestHandler,
+	config::{
+		parse_addr, FullNetworkConfiguration, IncomingRequest, MultiaddrWithPeerId,
+		NonDefaultSetConfig, NotificationHandshake, Params, SetConfig, TransportConfig,
+	},
 	discovery::DiscoveryConfig,
 	error::Error,
 	event::{DhtEvent, Event},
 	network_state::{
 		NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer,
 	},
-	peer_store::{PeerStoreHandle, PeerStoreProvider},
+	peer_store::{PeerStore, PeerStoreProvider},
 	protocol::{self, NotifsHandlerError, Protocol, Ready},
 	protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId},
-	request_responses::{IfDisconnected, RequestFailure},
+	request_responses::{IfDisconnected, ProtocolConfig as RequestResponseConfig, RequestFailure},
 	service::{
 		signature::{Signature, SigningError},
 		traits::{
-			NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers,
-			NetworkRequest, NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider,
-			NotificationSender as NotificationSenderT, NotificationSenderError,
-			NotificationSenderReady as NotificationSenderReadyT,
+			BandwidthSink, NetworkBackend, NetworkDHTProvider, NetworkEventStream, NetworkPeers,
+			NetworkRequest, NetworkService as NetworkServiceT, NetworkSigner, NetworkStateInfo,
+			NetworkStatus, NetworkStatusProvider, NotificationSender as NotificationSenderT,
+			NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT,
 		},
 	},
 	transport,
 	types::ProtocolName,
-	ReputationChange,
+	Multiaddr, NotificationService, PeerId, ReputationChange,
 };
 
 use codec::DecodeAll;
@@ -69,12 +73,13 @@ use libp2p::{
 		AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor,
 		ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr,
 	},
-	Multiaddr, PeerId,
 };
 use log::{debug, error, info, trace, warn};
 use metrics::{Histogram, MetricSources, Metrics};
 use parking_lot::Mutex;
+use prometheus_endpoint::Registry;
 
+use sc_client_api::BlockBackend;
 use sc_network_common::{
 	role::{ObservedRole, Roles},
 	ExHashT,
@@ -94,18 +99,34 @@ use std::{
 		atomic::{AtomicUsize, Ordering},
 		Arc,
 	},
+	time::Duration,
 };
 
 pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure};
 pub use libp2p::identity::{DecodingError, Keypair, PublicKey};
+pub use metrics::NotificationMetrics;
 pub use protocol::NotificationsSink;
 
-mod metrics;
-mod out_events;
+pub(crate) mod metrics;
+pub(crate) mod out_events;
 
 pub mod signature;
 pub mod traits;
 
+struct Libp2pBandwidthSink {
+	sink: Arc<transport::BandwidthSinks>,
+}
+
+impl BandwidthSink for Libp2pBandwidthSink {
+	fn total_inbound(&self) -> u64 {
+		self.sink.total_inbound()
+	}
+
+	fn total_outbound(&self) -> u64 {
+		self.sink.total_outbound()
+	}
+}
+
 /// Substrate network service. Handles network IO and manages connectivity.
 pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	/// Number of peers we're connected to.
@@ -119,9 +140,7 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	/// The `KeyPair` that defines the `PeerId` of the local node.
 	local_identity: Keypair,
 	/// Bandwidth logging system. Can be queried to know the average bandwidth consumed.
-	bandwidth: Arc<transport::BandwidthSinks>,
-	/// Used to query and report reputation changes.
-	peer_store_handle: PeerStoreHandle,
+	bandwidth: Arc<dyn BandwidthSink>,
 	/// Channel that sends messages to the actual worker.
 	to_worker: TracingUnboundedSender<ServiceToWorkerMsg>,
 	/// Protocol name -> `SetId` mapping for notification protocols. The map never changes after
@@ -132,6 +151,8 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	protocol_handles: Vec<protocol_controller::ProtocolHandle>,
 	/// Shortcut to sync protocol handle (`protocol_handles[0]`).
 	sync_protocol_handle: protocol_controller::ProtocolHandle,
+	/// Handle to `PeerStore`.
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
 	/// compatibility.
 	_marker: PhantomData<H>,
@@ -139,6 +160,91 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	_block: PhantomData<B>,
 }
 
+#[async_trait::async_trait]
+impl<B, H> NetworkBackend<B, H> for NetworkWorker<B, H>
+where
+	B: BlockT + 'static,
+	H: ExHashT,
+{
+	type NotificationProtocolConfig = NonDefaultSetConfig;
+	type RequestResponseProtocolConfig = RequestResponseConfig;
+	type NetworkService<Block, Hash> = Arc<NetworkService<B, H>>;
+	type PeerStore = PeerStore;
+	type BitswapConfig = RequestResponseConfig;
+
+	fn new(params: Params<B, H, Self>) -> Result<Self, Error>
+	where
+		Self: Sized,
+	{
+		NetworkWorker::new(params)
+	}
+
+	/// Get handle to `NetworkService` of the `NetworkBackend`.
+	fn network_service(&self) -> Arc<dyn NetworkServiceT> {
+		self.service.clone()
+	}
+
+	/// Create `PeerStore`.
+	fn peer_store(bootnodes: Vec<sc_network_types::PeerId>) -> Self::PeerStore {
+		PeerStore::new(bootnodes.into_iter().map(From::from).collect())
+	}
+
+	fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics {
+		NotificationMetrics::new(registry)
+	}
+
+	fn bitswap_server(
+		client: Arc<dyn BlockBackend<B> + Send + Sync>,
+	) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig) {
+		let (handler, protocol_config) = BitswapRequestHandler::new(client.clone());
+
+		(Box::pin(async move { handler.run().await }), protocol_config)
+	}
+
+	/// Create notification protocol configuration.
+	fn notification_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_notification_size: u64,
+		handshake: Option<NotificationHandshake>,
+		set_config: SetConfig,
+		_metrics: NotificationMetrics,
+		_peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>) {
+		NonDefaultSetConfig::new(
+			protocol_name,
+			fallback_names,
+			max_notification_size,
+			handshake,
+			set_config,
+		)
+	}
+
+	/// Create request-response protocol configuration.
+	fn request_response_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_request_size: u64,
+		max_response_size: u64,
+		request_timeout: Duration,
+		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+	) -> Self::RequestResponseProtocolConfig {
+		Self::RequestResponseProtocolConfig {
+			name: protocol_name,
+			fallback_names,
+			max_request_size,
+			max_response_size,
+			request_timeout,
+			inbound_queue,
+		}
+	}
+
+	/// Start [`NetworkBackend`] event loop.
+	async fn run(mut self) {
+		self.run().await
+	}
+}
+
 impl<B, H> NetworkWorker<B, H>
 where
 	B: BlockT + 'static,
@@ -149,11 +255,13 @@ where
 	/// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order
 	/// for the network processing to advance. From it, you can extract a `NetworkService` using
 	/// `worker.service()`. The `NetworkService` can be shared through the codebase.
-	pub fn new(params: Params<B>) -> Result<Self, Error> {
+	pub fn new(params: Params<B, H, Self>) -> Result<Self, Error> {
+		let peer_store_handle = params.network_config.peer_store_handle();
 		let FullNetworkConfiguration {
 			notification_protocols,
 			request_response_protocols,
 			mut network_config,
+			..
 		} = params.network_config;
 
 		// Private and public keys configuration.
@@ -164,14 +272,14 @@ where
 		network_config.boot_nodes = network_config
 			.boot_nodes
 			.into_iter()
-			.filter(|boot_node| boot_node.peer_id != local_peer_id)
+			.filter(|boot_node| boot_node.peer_id != local_peer_id.into())
 			.collect();
 		network_config.default_peers_set.reserved_nodes = network_config
 			.default_peers_set
 			.reserved_nodes
 			.into_iter()
 			.filter(|reserved_node| {
-				if reserved_node.peer_id == local_peer_id {
+				if reserved_node.peer_id == local_peer_id.into() {
 					warn!(
 						target: "sub-libp2p",
 						"Local peer ID used in reserved node, ignoring: {}",
@@ -219,6 +327,7 @@ where
 			"🏷  Local node identity is: {}",
 			local_peer_id.to_base58(),
 		);
+		log::info!(target: "sub-libp2p", "Running libp2p network backend");
 
 		let (transport, bandwidth) = {
 			let config_mem = match network_config.transport {
@@ -284,7 +393,7 @@ where
 					reserved_nodes: set_config
 						.reserved_nodes
 						.iter()
-						.map(|node| node.peer_id)
+						.map(|node| node.peer_id.into())
 						.collect(),
 					reserved_only: set_config.non_reserved_mode.is_reserved_only(),
 				};
@@ -293,7 +402,7 @@ where
 					SetId::from(set_id),
 					proto_set_config,
 					to_notifications.clone(),
-					Box::new(params.peer_store.clone()),
+					Arc::clone(&peer_store_handle),
 				)
 			})
 			.unzip();
@@ -354,8 +463,8 @@ where
 			{
 				Err(Error::DuplicateBootnode {
 					address: bootnode.multiaddr.clone(),
-					first_id: bootnode.peer_id,
-					second_id: other.peer_id,
+					first_id: bootnode.peer_id.into(),
+					second_id: other.peer_id.into(),
 				})
 			} else {
 				Ok(())
@@ -367,7 +476,7 @@ where
 
 		for bootnode in network_config.boot_nodes.iter() {
 			boot_node_ids
-				.entry(bootnode.peer_id)
+				.entry(bootnode.peer_id.into())
 				.or_default()
 				.push(bootnode.multiaddr.clone());
 		}
@@ -379,10 +488,10 @@ where
 
 		let (protocol, notif_protocol_handles) = Protocol::new(
 			From::from(&params.role),
-			&params.metrics_registry,
+			params.notification_metrics,
 			notification_protocols,
 			params.block_announce_config,
-			params.peer_store.clone(),
+			Arc::clone(&peer_store_handle),
 			protocol_handles.clone(),
 			from_protocol_controllers,
 		)?;
@@ -394,7 +503,12 @@ where
 
 			let discovery_config = {
 				let mut config = DiscoveryConfig::new(local_public.to_peer_id());
-				config.with_permanent_addresses(known_addresses);
+				config.with_permanent_addresses(
+					known_addresses
+						.iter()
+						.map(|(peer, address)| (peer.into(), address.clone()))
+						.collect::<Vec<_>>(),
+				);
 				config.discovery_limit(u64::from(network_config.default_peers_set.out_peers) + 15);
 				config.with_kademlia(
 					params.genesis_hash,
@@ -433,7 +547,7 @@ where
 					local_public,
 					discovery_config,
 					request_response_protocols,
-					params.peer_store.clone(),
+					Arc::clone(&peer_store_handle),
 					external_addresses.clone(),
 				);
 
@@ -474,7 +588,7 @@ where
 				.per_connection_event_buffer_size(24)
 				.max_negotiating_inbound_streams(2048);
 
-			(builder.build(), bandwidth)
+			(builder.build(), Arc::new(Libp2pBandwidthSink { sink: bandwidth }))
 		};
 
 		// Initialize the metrics.
@@ -518,7 +632,7 @@ where
 			notification_protocol_ids,
 			protocol_handles,
 			sync_protocol_handle,
-			peer_store_handle: params.peer_store.clone(),
+			peer_store_handle: Arc::clone(&peer_store_handle),
 			_marker: PhantomData,
 			_block: Default::default(),
 		});
@@ -533,7 +647,7 @@ where
 			metrics,
 			boot_node_ids,
 			reported_invalid_boot_nodes: Default::default(),
-			peer_store_handle: params.peer_store,
+			peer_store_handle: Arc::clone(&peer_store_handle),
 			notif_protocol_handles,
 			_marker: Default::default(),
 			_block: Default::default(),
@@ -701,7 +815,7 @@ where
 
 	/// Removes a `PeerId` from the list of reserved peers.
 	pub fn remove_reserved_peer(&self, peer: PeerId) {
-		self.service.remove_reserved_peer(peer);
+		self.service.remove_reserved_peer(peer.into());
 	}
 
 	/// Adds a `PeerId` and its `Multiaddr` as reserved.
@@ -731,18 +845,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
 		}
 	}
 
-	/// Get the list of reserved peers.
-	///
-	/// Returns an error if the `NetworkWorker` is no longer running.
-	pub async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
-		let (tx, rx) = oneshot::channel();
-
-		self.sync_protocol_handle.reserved_peers(tx);
-
-		// The channel can only be closed if `ProtocolController` no longer exists.
-		rx.await.map_err(|_| ())
-	}
-
 	/// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates.
 	///
 	/// Returns an `Err` if one of the given addresses is invalid or contains an
@@ -788,8 +890,8 @@ where
 	}
 
 	/// Returns the local Peer ID.
-	fn local_peer_id(&self) -> PeerId {
-		self.local_peer_id
+	fn local_peer_id(&self) -> sc_network_types::PeerId {
+		self.local_peer_id.into()
 	}
 }
 
@@ -798,8 +900,29 @@ where
 	B: sp_runtime::traits::Block,
 	H: ExHashT,
 {
-	fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError> {
-		Signature::sign_message(msg.as_ref(), &self.local_identity)
+	fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
+		let public_key = self.local_identity.public();
+		let bytes = self.local_identity.sign(msg.as_ref())?;
+
+		Ok(Signature {
+			public_key: crate::service::signature::PublicKey::Libp2p(public_key),
+			bytes,
+		})
+	}
+
+	fn verify(
+		&self,
+		peer_id: sc_network_types::PeerId,
+		public_key: &Vec<u8>,
+		signature: &Vec<u8>,
+		message: &Vec<u8>,
+	) -> Result<bool, String> {
+		let public_key =
+			PublicKey::try_decode_protobuf(&public_key).map_err(|error| error.to_string())?;
+		let peer_id: PeerId = peer_id.into();
+		let remote: libp2p::PeerId = public_key.to_peer_id();
+
+		Ok(peer_id == remote && public_key.verify(message, signature))
 	}
 }
 
@@ -844,39 +967,55 @@ where
 			Err(_) => Err(()),
 		}
 	}
+
+	async fn network_state(&self) -> Result<NetworkState, ()> {
+		let (tx, rx) = oneshot::channel();
+
+		let _ = self
+			.to_worker
+			.unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx });
+
+		match rx.await {
+			Ok(v) => v.map_err(|_| ()),
+			// The channel can only be closed if the network worker no longer exists.
+			Err(_) => Err(()),
+		}
+	}
 }
 
+#[async_trait::async_trait]
 impl<B, H> NetworkPeers for NetworkService<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
 {
-	fn set_authorized_peers(&self, peers: HashSet<PeerId>) {
-		self.sync_protocol_handle.set_reserved_peers(peers);
+	fn set_authorized_peers(&self, peers: HashSet<sc_network_types::PeerId>) {
+		self.sync_protocol_handle
+			.set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect());
 	}
 
 	fn set_authorized_only(&self, reserved_only: bool) {
 		self.sync_protocol_handle.set_reserved_only(reserved_only);
 	}
 
-	fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) {
+	fn add_known_address(&self, peer_id: sc_network_types::PeerId, addr: Multiaddr) {
 		let _ = self
 			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
+			.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr));
 	}
 
-	fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) {
+	fn report_peer(&self, peer_id: sc_network_types::PeerId, cost_benefit: ReputationChange) {
 		self.peer_store_handle.clone().report_peer(peer_id, cost_benefit);
 	}
 
-	fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
+	fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32 {
 		self.peer_store_handle.peer_reputation(peer_id)
 	}
 
-	fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName) {
+	fn disconnect_peer(&self, peer_id: sc_network_types::PeerId, protocol: ProtocolName) {
 		let _ = self
 			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(peer_id, protocol));
+			.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(peer_id.into(), protocol));
 	}
 
 	fn accept_unreserved_peers(&self) {
@@ -889,19 +1028,21 @@ where
 
 	fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> {
 		// Make sure the local peer ID is never added as a reserved peer.
-		if peer.peer_id == self.local_peer_id {
+		if peer.peer_id == self.local_peer_id.into() {
 			return Err("Local peer ID cannot be added as a reserved peer.".to_string())
 		}
 
-		let _ = self
-			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr));
-		self.sync_protocol_handle.add_reserved_peer(peer.peer_id);
+		let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(
+			peer.peer_id.into(),
+			peer.multiaddr,
+		));
+		self.sync_protocol_handle.add_reserved_peer(peer.peer_id.into());
+
 		Ok(())
 	}
 
-	fn remove_reserved_peer(&self, peer_id: PeerId) {
-		self.sync_protocol_handle.remove_reserved_peer(peer_id);
+	fn remove_reserved_peer(&self, peer_id: sc_network_types::PeerId) {
+		self.sync_protocol_handle.remove_reserved_peer(peer_id.into());
 	}
 
 	fn set_reserved_peers(
@@ -915,7 +1056,8 @@ where
 
 		let peers_addrs = self.split_multiaddr_and_peer_id(peers)?;
 
-		let mut peers: HashSet<PeerId> = HashSet::with_capacity(peers_addrs.len());
+		let mut peers: HashSet<sc_network_types::PeerId> =
+			HashSet::with_capacity(peers_addrs.len());
 
 		for (peer_id, addr) in peers_addrs.into_iter() {
 			// Make sure the local peer ID is never added to the PSM.
@@ -923,7 +1065,7 @@ where
 				return Err("Local peer ID cannot be added as a reserved peer.".to_string())
 			}
 
-			peers.insert(peer_id);
+			peers.insert(peer_id.into());
 
 			if !addr.is_empty() {
 				let _ = self
@@ -932,7 +1074,8 @@ where
 			}
 		}
 
-		self.protocol_handles[usize::from(*set_id)].set_reserved_peers(peers);
+		self.protocol_handles[usize::from(*set_id)]
+			.set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect());
 
 		Ok(())
 	}
@@ -972,7 +1115,7 @@ where
 	fn remove_peers_from_reserved_set(
 		&self,
 		protocol: ProtocolName,
-		peers: Vec<PeerId>,
+		peers: Vec<sc_network_types::PeerId>,
 	) -> Result<(), String> {
 		let Some(set_id) = self.notification_protocol_ids.get(&protocol) else {
 			return Err(format!(
@@ -982,7 +1125,7 @@ where
 		};
 
 		for peer_id in peers.into_iter() {
-			self.protocol_handles[usize::from(*set_id)].remove_reserved_peer(peer_id);
+			self.protocol_handles[usize::from(*set_id)].remove_reserved_peer(peer_id.into());
 		}
 
 		Ok(())
@@ -992,15 +1135,33 @@ where
 		self.num_connected.load(Ordering::Relaxed)
 	}
 
-	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
+	fn peer_role(
+		&self,
+		peer_id: sc_network_types::PeerId,
+		handshake: Vec<u8>,
+	) -> Option<ObservedRole> {
 		match Roles::decode_all(&mut &handshake[..]) {
 			Ok(role) => Some(role.into()),
 			Err(_) => {
 				log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}");
-				self.peer_store_handle.peer_role(&peer_id)
+				self.peer_store_handle.peer_role(&(peer_id.into()))
 			},
 		}
 	}
+
+	/// Get the list of reserved peers.
+	///
+	/// Returns an error if the `NetworkWorker` is no longer running.
+	async fn reserved_peers(&self) -> Result<Vec<sc_network_types::PeerId>, ()> {
+		let (tx, rx) = oneshot::channel();
+
+		self.sync_protocol_handle.reserved_peers(tx);
+
+		// The channel can only be closed if `ProtocolController` no longer exists.
+		rx.await
+			.map(|peers| peers.into_iter().map(From::from).collect())
+			.map_err(|_| ())
+	}
 }
 
 impl<B, H> NetworkEventStream for NetworkService<B, H>
@@ -1015,28 +1176,6 @@ where
 	}
 }
 
-impl<B, H> NetworkNotification for NetworkService<B, H>
-where
-	B: BlockT + 'static,
-	H: ExHashT,
-{
-	fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec<u8>) {
-		unimplemented!();
-	}
-
-	fn notification_sender(
-		&self,
-		_target: PeerId,
-		_protocol: ProtocolName,
-	) -> Result<Box<dyn NotificationSenderT>, NotificationSenderError> {
-		unimplemented!();
-	}
-
-	fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
-		unimplemented!();
-	}
-}
-
 #[async_trait::async_trait]
 impl<B, H> NetworkRequest for NetworkService<B, H>
 where
@@ -1045,7 +1184,7 @@ where
 {
 	async fn request(
 		&self,
-		target: PeerId,
+		target: sc_network_types::PeerId,
 		protocol: ProtocolName,
 		request: Vec<u8>,
 		fallback_request: Option<(Vec<u8>, ProtocolName)>,
@@ -1053,7 +1192,7 @@ where
 	) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
 		let (tx, rx) = oneshot::channel();
 
-		self.start_request(target, protocol, request, fallback_request, tx, connect);
+		self.start_request(target.into(), protocol, request, fallback_request, tx, connect);
 
 		match rx.await {
 			Ok(v) => v,
@@ -1066,7 +1205,7 @@ where
 
 	fn start_request(
 		&self,
-		target: PeerId,
+		target: sc_network_types::PeerId,
 		protocol: ProtocolName,
 		request: Vec<u8>,
 		fallback_request: Option<(Vec<u8>, ProtocolName)>,
@@ -1074,7 +1213,7 @@ where
 		connect: IfDisconnected,
 	) {
 		let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request {
-			target,
+			target: target.into(),
 			protocol: protocol.into(),
 			request,
 			fallback_request,
@@ -1204,7 +1343,7 @@ where
 	/// Boot nodes that we already have reported as invalid.
 	reported_invalid_boot_nodes: HashSet<PeerId>,
 	/// Peer reputation store handle.
-	peer_store_handle: PeerStoreHandle,
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
 	/// Notification protocol handles.
 	notif_protocol_handles: Vec<protocol::ProtocolHandle>,
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
@@ -1394,7 +1533,7 @@ where
 				},
 			SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { peer, changes }) => {
 				for change in changes {
-					self.peer_store_handle.report_peer(peer, change);
+					self.peer_store_handle.report_peer(peer.into(), change);
 				}
 			},
 			SwarmEvent::Behaviour(BehaviourOut::PeerIdentify {
@@ -1417,10 +1556,10 @@ where
 						.behaviour_mut()
 						.add_self_reported_address_to_dht(&peer_id, &protocols, addr);
 				}
-				self.peer_store_handle.add_known_peer(peer_id);
+				self.peer_store_handle.add_known_peer(peer_id.into());
 			},
 			SwarmEvent::Behaviour(BehaviourOut::Discovered(peer_id)) => {
-				self.peer_store_handle.add_known_peer(peer_id);
+				self.peer_store_handle.add_known_peer(peer_id.into());
 			},
 			SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted) => {
 				if let Some(metrics) = self.metrics.as_ref() {
@@ -1720,7 +1859,7 @@ where
 {
 }
 
-fn ensure_addresses_consistent_with_transport<'a>(
+pub(crate) fn ensure_addresses_consistent_with_transport<'a>(
 	addresses: impl Iterator<Item = &'a Multiaddr>,
 	transport: &TransportConfig,
 ) -> Result<(), Error> {
diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs
index c349fd98c76b02e9e7a60142c242061f8fec72fe..3b15b3e81d9c7adb5f337b2a67adf371b215e282 100644
--- a/substrate/client/network/src/service/metrics.rs
+++ b/substrate/client/network/src/service/metrics.rs
@@ -16,11 +16,13 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::transport::BandwidthSinks;
+use crate::{service::traits::BandwidthSink, ProtocolName};
+
 use prometheus_endpoint::{
 	self as prometheus, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, MetricSource, Opts,
 	PrometheusError, Registry, SourcedCounter, SourcedGauge, U64,
 };
+
 use std::{
 	str,
 	sync::{
@@ -38,13 +40,30 @@ pub fn register(registry: &Registry, sources: MetricSources) -> Result<Metrics,
 	Metrics::register(registry)
 }
 
+// Register `sc-network` metrics without bandwidth/connected peer sources.
+pub fn register_without_sources(registry: &Registry) -> Result<Metrics, PrometheusError> {
+	Metrics::register(registry)
+}
+
 /// Predefined metric sources that are fed directly into prometheus.
 pub struct MetricSources {
-	pub bandwidth: Arc<BandwidthSinks>,
+	pub bandwidth: Arc<dyn BandwidthSink>,
 	pub connected_peers: Arc<AtomicUsize>,
 }
 
+impl MetricSources {
+	pub fn register(
+		registry: &Registry,
+		bandwidth: Arc<dyn BandwidthSink>,
+		connected_peers: Arc<AtomicUsize>,
+	) -> Result<(), PrometheusError> {
+		BandwidthCounters::register(registry, bandwidth)?;
+		NumConnectedGauge::register(registry, connected_peers)
+	}
+}
+
 /// Dedicated metrics.
+#[derive(Clone)]
 pub struct Metrics {
 	// This list is ordered alphabetically
 	pub connections_closed_total: CounterVec<U64>,
@@ -208,12 +227,12 @@ impl Metrics {
 
 /// The bandwidth counter metric.
 #[derive(Clone)]
-pub struct BandwidthCounters(Arc<BandwidthSinks>);
+pub struct BandwidthCounters(Arc<dyn BandwidthSink>);
 
 impl BandwidthCounters {
 	/// Registers the `BandwidthCounters` metric whose values are
 	/// obtained from the given sinks.
-	fn register(registry: &Registry, sinks: Arc<BandwidthSinks>) -> Result<(), PrometheusError> {
+	fn register(registry: &Registry, sinks: Arc<dyn BandwidthSink>) -> Result<(), PrometheusError> {
 		prometheus::register(
 			SourcedCounter::new(
 				&Opts::new("substrate_sub_libp2p_network_bytes_total", "Total bandwidth usage")
@@ -263,3 +282,115 @@ impl MetricSource for NumConnectedGauge {
 		set(&[], self.0.load(Ordering::Relaxed) as u64);
 	}
 }
+
+/// Notification metrics.
+///
+/// Wrapper over `Option<InnerNotificationMetrics>` to make metrics reporting code cleaner.
+#[derive(Debug, Clone)]
+pub struct NotificationMetrics {
+	/// Metrics, if enabled.
+	metrics: Option<InnerNotificationMetrics>,
+}
+
+impl NotificationMetrics {
+	/// Create new [`NotificationMetrics`].
+	pub fn new(registry: Option<&Registry>) -> NotificationMetrics {
+		let metrics = match registry {
+			Some(registry) => InnerNotificationMetrics::register(registry).ok(),
+			None => None,
+		};
+
+		Self { metrics }
+	}
+
+	/// Register opened substream to Prometheus.
+	pub fn register_substream_opened(&self, protocol: &ProtocolName) {
+		if let Some(metrics) = &self.metrics {
+			metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc();
+		}
+	}
+
+	/// Register closed substream to Prometheus.
+	pub fn register_substream_closed(&self, protocol: &ProtocolName) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.notifications_streams_closed_total
+				.with_label_values(&[&protocol[..]])
+				.inc();
+		}
+	}
+
+	/// Register sent notification to Prometheus.
+	pub fn register_notification_sent(&self, protocol: &ProtocolName, size: usize) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.notifications_sizes
+				.with_label_values(&["out", protocol])
+				.observe(size as f64);
+		}
+	}
+
+	/// Register received notification to Prometheus.
+	pub fn register_notification_received(&self, protocol: &ProtocolName, size: usize) {
+		if let Some(metrics) = &self.metrics {
+			metrics
+				.notifications_sizes
+				.with_label_values(&["in", protocol])
+				.observe(size as f64);
+		}
+	}
+}
+
+/// Notification metrics.
+#[derive(Debug, Clone)]
+struct InnerNotificationMetrics {
+	// Total number of opened substreams.
+	pub notifications_streams_opened_total: CounterVec<U64>,
+
+	/// Total number of closed substreams.
+	pub notifications_streams_closed_total: CounterVec<U64>,
+
+	/// In/outbound notification sizes.
+	pub notifications_sizes: HistogramVec,
+}
+
+impl InnerNotificationMetrics {
+	fn register(registry: &Registry) -> Result<Self, PrometheusError> {
+		Ok(Self {
+			notifications_sizes: prometheus::register(
+				HistogramVec::new(
+					HistogramOpts {
+						common_opts: Opts::new(
+							"substrate_sub_libp2p_notifications_sizes",
+							"Sizes of the notifications send to and received from all nodes",
+						),
+						buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
+							.expect("parameters are always valid values; qed"),
+					},
+					&["direction", "protocol"],
+				)?,
+				registry,
+			)?,
+			notifications_streams_closed_total: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"substrate_sub_libp2p_notifications_streams_closed_total",
+						"Total number of notification substreams that have been closed",
+					),
+					&["protocol"],
+				)?,
+				registry,
+			)?,
+			notifications_streams_opened_total: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"substrate_sub_libp2p_notifications_streams_opened_total",
+						"Total number of notification substreams that have been opened",
+					),
+					&["protocol"],
+				)?,
+				registry,
+			)?,
+		})
+	}
+}
diff --git a/substrate/client/network/src/service/signature.rs b/substrate/client/network/src/service/signature.rs
index 5b2ba6be8cf8dec5516af04375816a860f9abf9a..f673102514630b0f00a8431825e82e08fc32dea3 100644
--- a/substrate/client/network/src/service/signature.rs
+++ b/substrate/client/network/src/service/signature.rs
@@ -20,38 +20,94 @@
 
 //! Signature-related code
 
-use libp2p::{
-	identity::{Keypair, PublicKey},
-	PeerId,
-};
-
 pub use libp2p::identity::SigningError;
 
+/// Public key.
+pub enum PublicKey {
+	/// Litep2p public key.
+	Libp2p(libp2p::identity::PublicKey),
+
+	/// Libp2p public key.
+	Litep2p(litep2p::crypto::PublicKey),
+}
+
+impl PublicKey {
+	/// Protobuf-encode [`PublicKey`].
+	pub fn encode_protobuf(&self) -> Vec<u8> {
+		match self {
+			Self::Libp2p(public) => public.encode_protobuf(),
+			Self::Litep2p(public) => public.to_protobuf_encoding(),
+		}
+	}
+
+	/// Get `PeerId` of the [`PublicKey`].
+	pub fn to_peer_id(&self) -> sc_network_types::PeerId {
+		match self {
+			Self::Libp2p(public) => public.to_peer_id().into(),
+			Self::Litep2p(public) => public.to_peer_id().into(),
+		}
+	}
+}
+
+/// Keypair.
+pub enum Keypair {
+	/// Litep2p keypair.
+	Libp2p(libp2p::identity::Keypair),
+
+	/// Libp2p keypair.
+	Litep2p(litep2p::crypto::ed25519::Keypair),
+}
+
+impl Keypair {
+	/// Generate ed25519 keypair.
+	pub fn generate_ed25519() -> Self {
+		Keypair::Litep2p(litep2p::crypto::ed25519::Keypair::generate())
+	}
+
+	/// Get [`Keypair`]'s public key.
+	pub fn public(&self) -> PublicKey {
+		match self {
+			Keypair::Libp2p(keypair) => PublicKey::Libp2p(keypair.public()),
+			Keypair::Litep2p(keypair) => PublicKey::Litep2p(keypair.public().into()),
+		}
+	}
+}
+
 /// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a
 /// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check
 /// if the signature was made by the entity that controls a given `PeerId`.
 pub struct Signature {
 	/// The public key derived from the network identity that signed the message.
 	pub public_key: PublicKey,
+
 	/// The actual signature made for the message signed.
 	pub bytes: Vec<u8>,
 }
 
 impl Signature {
+	/// Create new [`Signature`].
+	pub fn new(public_key: PublicKey, bytes: Vec<u8>) -> Self {
+		Self { public_key, bytes }
+	}
+
 	/// Create a signature for a message with a given network identity.
 	pub fn sign_message(
 		message: impl AsRef<[u8]>,
 		keypair: &Keypair,
 	) -> Result<Self, SigningError> {
-		let public_key = keypair.public();
-		let bytes = keypair.sign(message.as_ref())?;
-		Ok(Self { public_key, bytes })
-	}
+		match keypair {
+			Keypair::Libp2p(keypair) => {
+				let public_key = keypair.public();
+				let bytes = keypair.sign(message.as_ref())?;
+
+				Ok(Signature { public_key: PublicKey::Libp2p(public_key), bytes })
+			},
+			Keypair::Litep2p(keypair) => {
+				let public_key = keypair.public();
+				let bytes = keypair.sign(message.as_ref());
 
-	/// Verify whether the signature was made for the given message by the entity that controls the
-	/// given `PeerId`.
-	pub fn verify(&self, message: impl AsRef<[u8]>, peer_id: &PeerId) -> bool {
-		*peer_id == self.public_key.to_peer_id() &&
-			self.public_key.verify(message.as_ref(), &self.bytes)
+				Ok(Signature { public_key: PublicKey::Litep2p(public_key.into()), bytes })
+			},
+		}
 	}
 }
diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs
index 74ddb986c247aa08c5227edc25164b035ac7b820..9bbaeb1026f9304d43d347893de5795fd0bb93b4 100644
--- a/substrate/client/network/src/service/traits.rs
+++ b/substrate/client/network/src/service/traits.rs
@@ -21,28 +21,165 @@
 //! Traits defined by `sc-network`.
 
 use crate::{
-	config::MultiaddrWithPeerId,
-	error,
+	config::{IncomingRequest, MultiaddrWithPeerId, NotificationHandshake, Params, SetConfig},
+	error::{self, Error},
 	event::Event,
+	network_state::NetworkState,
 	request_responses::{IfDisconnected, RequestFailure},
-	service::signature::Signature,
+	service::{metrics::NotificationMetrics, signature::Signature, PeerStoreProvider},
 	types::ProtocolName,
-	ReputationChange,
+	Multiaddr, ReputationChange,
 };
 
 use futures::{channel::oneshot, Stream};
-use libp2p::{Multiaddr, PeerId};
+use prometheus_endpoint::Registry;
 
-use sc_network_common::role::ObservedRole;
+use sc_client_api::BlockBackend;
+use sc_network_common::{role::ObservedRole, ExHashT};
+use sc_network_types::PeerId;
+use sp_runtime::traits::Block as BlockT;
 
-use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc};
+use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
 
 pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey};
 
+/// Supertrait defining the services provided by [`NetworkBackend`] service handle.
+pub trait NetworkService:
+	NetworkSigner
+	+ NetworkDHTProvider
+	+ NetworkStatusProvider
+	+ NetworkPeers
+	+ NetworkEventStream
+	+ NetworkStateInfo
+	+ NetworkRequest
+	+ Send
+	+ Sync
+	+ 'static
+{
+}
+
+impl<T> NetworkService for T where
+	T: NetworkSigner
+		+ NetworkDHTProvider
+		+ NetworkStatusProvider
+		+ NetworkPeers
+		+ NetworkEventStream
+		+ NetworkStateInfo
+		+ NetworkRequest
+		+ Send
+		+ Sync
+		+ 'static
+{
+}
+
+/// Trait defining the required functionality from a notification protocol configuration.
+pub trait NotificationConfig: Debug {
+	/// Get access to the `SetConfig` of the notification protocol.
+	fn set_config(&self) -> &SetConfig;
+
+	/// Get protocol name.
+	fn protocol_name(&self) -> &ProtocolName;
+}
+
+/// Trait defining the required functionality from a request-response protocol configuration.
+pub trait RequestResponseConfig: Debug {
+	/// Get protocol name.
+	fn protocol_name(&self) -> &ProtocolName;
+}
+
+/// Trait defining required functionality from `PeerStore`.
+#[async_trait::async_trait]
+pub trait PeerStore {
+	/// Get handle to `PeerStore`.
+	fn handle(&self) -> Arc<dyn PeerStoreProvider>;
+
+	/// Start running `PeerStore` event loop.
+	async fn run(self);
+}
+
+/// Networking backend.
+#[async_trait::async_trait]
+pub trait NetworkBackend<B: BlockT + 'static, H: ExHashT>: Send + 'static {
+	/// Type representing notification protocol-related configuration.
+	type NotificationProtocolConfig: NotificationConfig;
+
+	/// Type representing request-response protocol-related configuration.
+	type RequestResponseProtocolConfig: RequestResponseConfig;
+
+	/// Type implementing `NetworkService` for the networking backend.
+	///
+	/// `NetworkService` allows other subsystems of the blockchain to interact with `sc-network`
+	/// using `NetworkService`.
+	type NetworkService<Block, Hash>: NetworkService + Clone;
+
+	/// Type implementing [`PeerStore`].
+	type PeerStore: PeerStore;
+
+	/// Bitswap config.
+	type BitswapConfig;
+
+	/// Create new `NetworkBackend`.
+	fn new(params: Params<B, H, Self>) -> Result<Self, Error>
+	where
+		Self: Sized;
+
+	/// Get handle to `NetworkService` of the `NetworkBackend`.
+	fn network_service(&self) -> Arc<dyn NetworkService>;
+
+	/// Create [`PeerStore`].
+	fn peer_store(bootnodes: Vec<PeerId>) -> Self::PeerStore;
+
+	/// Register metrics that are used by the notification protocols.
+	fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics;
+
+	/// Create Bitswap server.
+	fn bitswap_server(
+		client: Arc<dyn BlockBackend<B> + Send + Sync>,
+	) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig);
+
+	/// Create notification protocol configuration and an associated `NotificationService`
+	/// for the protocol.
+	fn notification_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_notification_size: u64,
+		handshake: Option<NotificationHandshake>,
+		set_config: SetConfig,
+		metrics: NotificationMetrics,
+		peerstore_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>);
+
+	/// Create request-response protocol configuration.
+	fn request_response_config(
+		protocol_name: ProtocolName,
+		fallback_names: Vec<ProtocolName>,
+		max_request_size: u64,
+		max_response_size: u64,
+		request_timeout: Duration,
+		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+	) -> Self::RequestResponseProtocolConfig;
+
+	/// Start [`NetworkBackend`] event loop.
+	async fn run(mut self);
+}
+
 /// Signer with network identity
 pub trait NetworkSigner {
 	/// Signs the message with the `KeyPair` that defines the local [`PeerId`].
-	fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError>;
+	fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError>;
+
+	/// Verify signature using peer's public key.
+	///
+	/// `public_key` must be Protobuf-encoded ed25519 public key.
+	///
+	/// Returns `Err(())` if public cannot be parsed into a valid ed25519 public key.
+	fn verify(
+		&self,
+		peer_id: sc_network_types::PeerId,
+		public_key: &Vec<u8>,
+		signature: &Vec<u8>,
+		message: &Vec<u8>,
+	) -> Result<bool, String>;
 }
 
 impl<T> NetworkSigner for Arc<T>
@@ -50,9 +187,19 @@ where
 	T: ?Sized,
 	T: NetworkSigner,
 {
-	fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError> {
+	fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
 		T::sign_with_local_identity(self, msg)
 	}
+
+	fn verify(
+		&self,
+		peer_id: sc_network_types::PeerId,
+		public_key: &Vec<u8>,
+		signature: &Vec<u8>,
+		message: &Vec<u8>,
+	) -> Result<bool, String> {
+		T::verify(self, peer_id, public_key, signature, message)
+	}
 }
 
 /// Provides access to the networking DHT.
@@ -117,6 +264,11 @@ pub trait NetworkStatusProvider {
 	///
 	/// Returns an error if the `NetworkWorker` is no longer running.
 	async fn status(&self) -> Result<NetworkStatus, ()>;
+
+	/// Get the network state.
+	///
+	/// Returns an error if the `NetworkWorker` is no longer running.
+	async fn network_state(&self) -> Result<NetworkState, ()>;
 }
 
 // Manual implementation to avoid extra boxing here
@@ -134,9 +286,20 @@ where
 	{
 		T::status(self)
 	}
+
+	fn network_state<'life0, 'async_trait>(
+		&'life0 self,
+	) -> Pin<Box<dyn Future<Output = Result<NetworkState, ()>> + Send + 'async_trait>>
+	where
+		'life0: 'async_trait,
+		Self: 'async_trait,
+	{
+		T::network_state(self)
+	}
 }
 
 /// Provides low-level API for manipulating network peers.
+#[async_trait::async_trait]
 pub trait NetworkPeers {
 	/// Set authorized peers.
 	///
@@ -237,9 +400,15 @@ pub trait NetworkPeers {
 	/// decoded into a role, the role queried from `PeerStore` and if the role is not stored
 	/// there either, `None` is returned and the peer should be discarded.
 	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole>;
+
+	/// Get the list of reserved peers.
+	///
+	/// Returns an error if the `NetworkWorker` is no longer running.
+	async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()>;
 }
 
 // Manual implementation to avoid extra boxing here
+#[async_trait::async_trait]
 impl<T> NetworkPeers for Arc<T>
 where
 	T: ?Sized,
@@ -316,6 +485,16 @@ where
 	fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
 		T::peer_role(self, peer_id, handshake)
 	}
+
+	fn reserved_peers<'life0, 'async_trait>(
+		&'life0 self,
+	) -> Pin<Box<dyn Future<Output = Result<Vec<PeerId>, ()>> + Send + 'async_trait>>
+	where
+		'life0: 'async_trait,
+		Self: 'async_trait,
+	{
+		T::reserved_peers(self)
+	}
 }
 
 /// Provides access to network-level event stream.
@@ -389,15 +568,15 @@ pub trait NotificationSender: Send + Sync + 'static {
 		-> Result<Box<dyn NotificationSenderReady + '_>, NotificationSenderError>;
 }
 
-/// Error returned by [`NetworkNotification::notification_sender`].
+/// Error returned by the notification sink.
 #[derive(Debug, thiserror::Error)]
 pub enum NotificationSenderError {
 	/// The notification receiver has been closed, usually because the underlying connection
 	/// closed.
 	///
 	/// Some of the notifications most recently sent may not have been received. However,
-	/// the peer may still be connected and a new `NotificationSender` for the same
-	/// protocol obtained from [`NetworkNotification::notification_sender`].
+	/// the peer may still be connected and a new notification sink for the same
+	/// protocol obtained from [`NotificationService::message_sink()`].
 	#[error("The notification receiver has been closed")]
 	Closed,
 	/// Protocol name hasn't been registered.
@@ -405,127 +584,6 @@ pub enum NotificationSenderError {
 	BadProtocol,
 }
 
-/// Provides ability to send network notifications.
-pub trait NetworkNotification {
-	/// Appends a notification to the buffer of pending outgoing notifications with the given peer.
-	/// Has no effect if the notifications channel with this protocol name is not open.
-	///
-	/// If the buffer of pending outgoing notifications with that peer is full, the notification
-	/// is silently dropped and the connection to the remote will start being shut down. This
-	/// happens if you call this method at a higher rate than the rate at which the peer processes
-	/// these notifications, or if the available network bandwidth is too low.
-	///
-	/// For this reason, this method is considered soft-deprecated. You are encouraged to use
-	/// [`NetworkNotification::notification_sender`] instead.
-	///
-	/// > **Note**: The reason why this is a no-op in the situation where we have no channel is
-	/// >			that we don't guarantee message delivery anyway. Networking issues can cause
-	/// >			connections to drop at any time, and higher-level logic shouldn't differentiate
-	/// >			between the remote voluntarily closing a substream or a network error
-	/// >			preventing the message from being delivered.
-	///
-	/// The protocol must have been registered with
-	/// `crate::config::NetworkConfiguration::notifications_protocols`.
-	fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>);
-
-	/// Obtains a [`NotificationSender`] for a connected peer, if it exists.
-	///
-	/// A `NotificationSender` is scoped to a particular connection to the peer that holds
-	/// a receiver. With a `NotificationSender` at hand, sending a notification is done in two
-	/// steps:
-	///
-	/// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready
-	/// for another notification, yielding a [`NotificationSenderReady`] token.
-	/// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation
-	/// can only fail if the underlying notification substream or connection has suddenly closed.
-	///
-	/// An error is returned by [`NotificationSenderReady::send`] if there exists no open
-	/// notifications substream with that combination of peer and protocol, or if the remote
-	/// has asked to close the notifications substream. If that happens, it is guaranteed that an
-	/// [`Event::NotificationStreamClosed`] has been generated on the stream returned by
-	/// [`NetworkEventStream::event_stream`].
-	///
-	/// If the remote requests to close the notifications substream, all notifications successfully
-	/// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the
-	/// substream actually gets closed, but attempting to enqueue more notifications will now
-	/// return an error. It is however possible for the entire connection to be abruptly closed,
-	/// in which case enqueued notifications will be lost.
-	///
-	/// The protocol must have been registered with
-	/// `crate::config::NetworkConfiguration::notifications_protocols`.
-	///
-	/// # Usage
-	///
-	/// This method returns a struct that allows waiting until there is space available in the
-	/// buffer of messages towards the given peer. If the peer processes notifications at a slower
-	/// rate than we send them, this buffer will quickly fill up.
-	///
-	/// As such, you should never do something like this:
-	///
-	/// ```ignore
-	/// // Do NOT do this
-	/// for peer in peers {
-	/// 	if let Ok(n) = network.notification_sender(peer, ...) {
-	/// 			if let Ok(s) = n.ready().await {
-	/// 				let _ = s.send(...);
-	/// 			}
-	/// 	}
-	/// }
-	/// ```
-	///
-	/// Doing so would slow down all peers to the rate of the slowest one. A malicious or
-	/// malfunctioning peer could intentionally process notifications at a very slow rate.
-	///
-	/// Instead, you are encouraged to maintain your own buffer of notifications on top of the one
-	/// maintained by `sc-network`, and use `notification_sender` to progressively send out
-	/// elements from your buffer. If this additional buffer is full (which will happen at some
-	/// point if the peer is too slow to process notifications), appropriate measures can be taken,
-	/// such as removing non-critical notifications from the buffer or disconnecting the peer
-	/// using [`NetworkPeers::disconnect_peer`].
-	///
-	///
-	/// Notifications              Per-peer buffer
-	///   broadcast    +------->   of notifications   +-->  `notification_sender`  +-->  Internet
-	///                    ^       (not covered by
-	///                    |         sc-network)
-	///                    +
-	///      Notifications should be dropped
-	///             if buffer is full
-	///
-	///
-	/// See also the `sc-network-gossip` crate for a higher-level way to send notifications.
-	fn notification_sender(
-		&self,
-		target: PeerId,
-		protocol: ProtocolName,
-	) -> Result<Box<dyn NotificationSender>, NotificationSenderError>;
-
-	/// Set handshake for the notification protocol.
-	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>);
-}
-
-impl<T> NetworkNotification for Arc<T>
-where
-	T: ?Sized,
-	T: NetworkNotification,
-{
-	fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		T::write_notification(self, target, protocol, message)
-	}
-
-	fn notification_sender(
-		&self,
-		target: PeerId,
-		protocol: ProtocolName,
-	) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
-		T::notification_sender(self, target, protocol)
-	}
-
-	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
-		T::set_notification_handshake(self, protocol, handshake)
-	}
-}
-
 /// Provides ability to send network requests.
 #[async_trait::async_trait]
 pub trait NetworkRequest {
@@ -662,6 +720,15 @@ pub enum Direction {
 	Outbound,
 }
 
+impl From<litep2p::protocol::notification::Direction> for Direction {
+	fn from(direction: litep2p::protocol::notification::Direction) -> Self {
+		match direction {
+			litep2p::protocol::notification::Direction::Inbound => Direction::Inbound,
+			litep2p::protocol::notification::Direction::Outbound => Direction::Outbound,
+		}
+	}
+}
+
 impl Direction {
 	/// Is the direction inbound.
 	pub fn is_inbound(&self) -> bool {
@@ -771,13 +838,13 @@ pub trait NotificationService: Debug + Send {
 	async fn close_substream(&mut self, peer: PeerId) -> Result<(), ()>;
 
 	/// Send synchronous `notification` to `peer`.
-	fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>);
+	fn send_sync_notification(&mut self, peer: &PeerId, notification: Vec<u8>);
 
 	/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
 	///
 	/// Returns an error if the peer doesn't exist.
 	async fn send_async_notification(
-		&self,
+		&mut self,
 		peer: &PeerId,
 		notification: Vec<u8>,
 	) -> Result<(), error::Error>;
@@ -827,3 +894,12 @@ pub trait MessageSink: Send + Sync {
 	/// Returns an error if the peer does not exist.
 	async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), error::Error>;
 }
+
+/// Trait defining the behavior of a bandwidth sink.
+pub trait BandwidthSink: Send + Sync {
+	/// Get the number of bytes received.
+	fn total_inbound(&self) -> u64;
+
+	/// Get the number of bytes sent.
+	fn total_outbound(&self) -> u64;
+}
diff --git a/substrate/client/network/src/types.rs b/substrate/client/network/src/types.rs
index b0e32ae109149e2485c62d469c1b14624989ac03..25517599469e66294563575f1fbe69cc21240ceb 100644
--- a/substrate/client/network/src/types.rs
+++ b/substrate/client/network/src/types.rs
@@ -28,6 +28,8 @@ use std::{
 	sync::Arc,
 };
 
+pub use libp2p::{multiaddr, Multiaddr, PeerId};
+
 /// The protocol name transmitted on the wire.
 #[derive(Debug, Clone)]
 pub enum ProtocolName {
@@ -98,6 +100,24 @@ impl upgrade::ProtocolName for ProtocolName {
 	}
 }
 
+impl From<ProtocolName> for litep2p::ProtocolName {
+	fn from(protocol: ProtocolName) -> Self {
+		match protocol {
+			ProtocolName::Static(inner) => litep2p::ProtocolName::from(inner),
+			ProtocolName::OnHeap(inner) => litep2p::ProtocolName::from(inner),
+		}
+	}
+}
+
+impl From<litep2p::ProtocolName> for ProtocolName {
+	fn from(protocol: litep2p::ProtocolName) -> Self {
+		match protocol {
+			litep2p::ProtocolName::Static(protocol) => ProtocolName::from(protocol),
+			litep2p::ProtocolName::Allocated(protocol) => ProtocolName::from(protocol),
+		}
+	}
+}
+
 #[cfg(test)]
 mod tests {
 	use super::ProtocolName;
diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml
index 635cfc5d0d5ecf21268822e39a88961b85190a4e..4ffe6d6e3aedf9154c073f63fa2ddbf4dda12c01 100644
--- a/substrate/client/network/statement/Cargo.toml
+++ b/substrate/client/network/statement/Cargo.toml
@@ -25,6 +25,8 @@ log = { workspace = true, default-features = true }
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" }
 sc-network-common = { path = "../common" }
 sc-network-sync = { path = "../sync" }
+sc-network-types = { path = "../types" }
 sc-network = { path = ".." }
 sp-consensus = { path = "../../../primitives/consensus/common" }
+sp-runtime = { path = "../../../primitives/runtime" }
 sp-statement-store = { path = "../../../primitives/statement-store" }
diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs
index 5187e681d83c48b09bea2476f8cfe8c8b6806a3c..df93788696e381b18e80cff73646bb1594636386 100644
--- a/substrate/client/network/statement/src/lib.rs
+++ b/substrate/client/network/statement/src/lib.rs
@@ -30,18 +30,23 @@ use crate::config::*;
 
 use codec::{Decode, Encode};
 use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt};
-use libp2p::{multiaddr, PeerId};
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
-	config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig},
-	error,
-	service::traits::{NotificationEvent, NotificationService, ValidationResult},
+	config::{NonReservedPeerMode, SetConfig},
+	error, multiaddr,
+	peer_store::PeerStoreProvider,
+	service::{
+		traits::{NotificationEvent, NotificationService, ValidationResult},
+		NotificationMetrics,
+	},
 	types::ProtocolName,
 	utils::{interval, LruHashSet},
-	NetworkEventStream, NetworkNotification, NetworkPeers,
+	NetworkBackend, NetworkEventStream, NetworkPeers,
 };
 use sc_network_common::role::ObservedRole;
 use sc_network_sync::{SyncEvent, SyncEventStream};
+use sc_network_types::PeerId;
+use sp_runtime::traits::Block as BlockT;
 use sp_statement_store::{
 	Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult,
 };
@@ -107,17 +112,23 @@ pub struct StatementHandlerPrototype {
 
 impl StatementHandlerPrototype {
 	/// Create a new instance.
-	pub fn new<Hash: AsRef<[u8]>>(
+	pub fn new<
+		Hash: AsRef<[u8]>,
+		Block: BlockT,
+		Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+	>(
 		genesis_hash: Hash,
 		fork_id: Option<&str>,
-	) -> (Self, NonDefaultSetConfig) {
+		metrics: NotificationMetrics,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self, Net::NotificationProtocolConfig) {
 		let genesis_hash = genesis_hash.as_ref();
 		let protocol_name = if let Some(fork_id) = fork_id {
 			format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id)
 		} else {
 			format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash))
 		};
-		let (config, notification_service) = NonDefaultSetConfig::new(
+		let (config, notification_service) = Net::notification_config(
 			protocol_name.clone().into(),
 			Vec::new(),
 			MAX_STATEMENT_SIZE,
@@ -128,6 +139,8 @@ impl StatementHandlerPrototype {
 				reserved_nodes: Vec::new(),
 				non_reserved_mode: NonReservedPeerMode::Deny,
 			},
+			metrics,
+			peer_store_handle,
 		);
 
 		(Self { protocol_name: protocol_name.into(), notification_service }, config)
@@ -138,7 +151,7 @@ impl StatementHandlerPrototype {
 	/// Important: the statements handler is initially disabled and doesn't gossip statements.
 	/// Gossiping is enabled when major syncing is done.
 	pub fn build<
-		N: NetworkPeers + NetworkEventStream + NetworkNotification,
+		N: NetworkPeers + NetworkEventStream,
 		S: SyncEventStream + sp_consensus::SyncOracle,
 	>(
 		self,
@@ -201,7 +214,7 @@ impl StatementHandlerPrototype {
 
 /// Handler for statements. Call [`StatementHandler::run`] to start the processing.
 pub struct StatementHandler<
-	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	N: NetworkPeers + NetworkEventStream,
 	S: SyncEventStream + sp_consensus::SyncOracle,
 > {
 	protocol_name: ProtocolName,
@@ -241,7 +254,7 @@ struct Peer {
 
 impl<N, S> StatementHandler<N, S>
 where
-	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	N: NetworkPeers + NetworkEventStream,
 	S: SyncEventStream + sp_consensus::SyncOracle,
 {
 	/// Turns the [`StatementHandler`] into a future that should run forever and not be
@@ -459,8 +472,7 @@ where
 
 			if !to_send.is_empty() {
 				log::trace!(target: LOG_TARGET, "Sending {} statements to {}", to_send.len(), who);
-				self.network
-					.write_notification(*who, self.protocol_name.clone(), to_send.encode());
+				self.notification_service.send_sync_notification(who, to_send.encode());
 			}
 		}
 
diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml
index 6b46d67a3cae7a32bec276108ccef1782de0943a..eb79973c2739a3bfd11477b4bbf0979249a741e6 100644
--- a/substrate/client/network/sync/Cargo.toml
+++ b/substrate/client/network/sync/Cargo.toml
@@ -40,6 +40,7 @@ sc-client-api = { path = "../../api" }
 sc-consensus = { path = "../../consensus/common" }
 sc-network = { path = ".." }
 sc-network-common = { path = "../common" }
+sc-network-types = { path = "../types" }
 sc-utils = { path = "../../utils" }
 sp-arithmetic = { path = "../../../primitives/arithmetic" }
 sp-blockchain = { path = "../../../primitives/blockchain" }
diff --git a/substrate/client/network/sync/src/block_announce_validator.rs b/substrate/client/network/sync/src/block_announce_validator.rs
index 62c0d1c16e213736bc1abc0b45c24bce43701970..3c994dd69442a0a7b0f29820b8044f0308436c48 100644
--- a/substrate/client/network/sync/src/block_announce_validator.rs
+++ b/substrate/client/network/sync/src/block_announce_validator.rs
@@ -21,9 +21,9 @@
 
 use crate::{futures_stream::FuturesStream, LOG_TARGET};
 use futures::{stream::FusedStream, Future, FutureExt, Stream, StreamExt};
-use libp2p::PeerId;
 use log::{debug, error, trace, warn};
 use sc_network_common::sync::message::BlockAnnounce;
+use sc_network_types::PeerId;
 use sp_consensus::block_validation::Validation;
 use sp_runtime::traits::{Block as BlockT, Header, Zero};
 use std::{
@@ -309,7 +309,7 @@ impl<B: BlockT> FusedStream for BlockAnnounceValidator<B> {
 mod tests {
 	use super::*;
 	use crate::block_announce_validator::AllocateSlotForBlockAnnounceValidation;
-	use libp2p::PeerId;
+	use sc_network_types::PeerId;
 	use sp_consensus::block_validation::DefaultBlockAnnounceValidator;
 	use substrate_test_runtime_client::runtime::Block;
 
diff --git a/substrate/client/network/sync/src/block_relay_protocol.rs b/substrate/client/network/sync/src/block_relay_protocol.rs
index b4ef72a10c6b8fc54532ef38903eea70fcbfdae8..3c5b3739e8222395b896003af8bc72e9ea68b9e7 100644
--- a/substrate/client/network/sync/src/block_relay_protocol.rs
+++ b/substrate/client/network/sync/src/block_relay_protocol.rs
@@ -17,12 +17,9 @@
 //! Block relay protocol related definitions.
 
 use futures::channel::oneshot;
-use libp2p::PeerId;
-use sc_network::{
-	request_responses::{ProtocolConfig, RequestFailure},
-	ProtocolName,
-};
+use sc_network::{request_responses::RequestFailure, NetworkBackend, ProtocolName};
 use sc_network_common::sync::message::{BlockData, BlockRequest};
+use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 use std::sync::Arc;
 
@@ -68,8 +65,8 @@ pub enum BlockResponseError {
 
 /// Block relay specific params for network creation, specified in
 /// ['sc_service::BuildNetworkParams'].
-pub struct BlockRelayParams<Block: BlockT> {
+pub struct BlockRelayParams<Block: BlockT, N: NetworkBackend<Block, <Block as BlockT>::Hash>> {
 	pub server: Box<dyn BlockServer<Block>>,
 	pub downloader: Arc<dyn BlockDownloader<Block>>,
-	pub request_response_config: ProtocolConfig,
+	pub request_response_config: N::RequestResponseProtocolConfig,
 }
diff --git a/substrate/client/network/sync/src/block_request_handler.rs b/substrate/client/network/sync/src/block_request_handler.rs
index 7dfa76278b8d11688cb1b8c167d19ce105ef610f..5aa374057a4a231144d39fc2c48818e906b5a245 100644
--- a/substrate/client/network/sync/src/block_request_handler.rs
+++ b/substrate/client/network/sync/src/block_request_handler.rs
@@ -29,24 +29,26 @@ use crate::{
 
 use codec::{Decode, DecodeAll, Encode};
 use futures::{channel::oneshot, stream::StreamExt};
-use libp2p::PeerId;
 use log::debug;
 use prost::Message;
+use schnellru::{ByLength, LruMap};
+
 use sc_client_api::BlockBackend;
 use sc_network::{
 	config::ProtocolId,
-	request_responses::{
-		IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure,
-	},
+	request_responses::{IfDisconnected, IncomingRequest, OutgoingResponse, RequestFailure},
+	service::traits::RequestResponseConfig,
 	types::ProtocolName,
+	NetworkBackend,
 };
 use sc_network_common::sync::message::{BlockAttributes, BlockData, BlockRequest, FromBlock};
-use schnellru::{ByLength, LruMap};
+use sc_network_types::PeerId;
 use sp_blockchain::HeaderBackend;
 use sp_runtime::{
 	generic::BlockId,
 	traits::{Block as BlockT, Header, One, Zero},
 };
+
 use std::{
 	cmp::min,
 	hash::{Hash, Hasher},
@@ -71,21 +73,26 @@ mod rep {
 		Rep::new(-(1 << 10), "same small block request multiple times");
 }
 
-/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests.
-pub fn generate_protocol_config<Hash: AsRef<[u8]>>(
+/// Generates a `RequestResponseProtocolConfig` for the block request protocol,
+/// refusing incoming requests.
+pub fn generate_protocol_config<
+	Hash: AsRef<[u8]>,
+	B: BlockT,
+	N: NetworkBackend<B, <B as BlockT>::Hash>,
+>(
 	protocol_id: &ProtocolId,
 	genesis_hash: Hash,
 	fork_id: Option<&str>,
-) -> ProtocolConfig {
-	ProtocolConfig {
-		name: generate_protocol_name(genesis_hash, fork_id).into(),
-		fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into())
-			.collect(),
-		max_request_size: 1024 * 1024,
-		max_response_size: 16 * 1024 * 1024,
-		request_timeout: Duration::from_secs(20),
-		inbound_queue: None,
-	}
+	inbound_queue: async_channel::Sender<IncomingRequest>,
+) -> N::RequestResponseProtocolConfig {
+	N::request_response_config(
+		generate_protocol_name(genesis_hash, fork_id).into(),
+		std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
+		1024 * 1024,
+		16 * 1024 * 1024,
+		Duration::from_secs(20),
+		Some(inbound_queue),
+	)
 }
 
 /// Generate the block protocol name from the genesis hash and fork id.
@@ -154,19 +161,19 @@ where
 	Client: HeaderBackend<B> + BlockBackend<B> + Send + Sync + 'static,
 {
 	/// Create a new [`BlockRequestHandler`].
-	pub fn new(
+	pub fn new<N: NetworkBackend<B, <B as BlockT>::Hash>>(
 		network: NetworkServiceHandle,
 		protocol_id: &ProtocolId,
 		fork_id: Option<&str>,
 		client: Arc<Client>,
 		num_peer_hint: usize,
-	) -> BlockRelayParams<B> {
+	) -> BlockRelayParams<B, N> {
 		// Reserve enough request slots for one request per peer when we are at the maximum
 		// number of peers.
 		let capacity = std::cmp::max(num_peer_hint, 1);
 		let (tx, request_receiver) = async_channel::bounded(capacity);
 
-		let mut protocol_config = generate_protocol_config(
+		let protocol_config = generate_protocol_config::<_, B, N>(
 			protocol_id,
 			client
 				.block_hash(0u32.into())
@@ -174,15 +181,18 @@ where
 				.flatten()
 				.expect("Genesis block exists; qed"),
 			fork_id,
+			tx,
 		);
-		protocol_config.inbound_queue = Some(tx);
 
 		let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2);
 		let seen_requests = LruMap::new(capacity);
 
 		BlockRelayParams {
 			server: Box::new(Self { client, request_receiver, seen_requests }),
-			downloader: Arc::new(FullBlockDownloader::new(protocol_config.name.clone(), network)),
+			downloader: Arc::new(FullBlockDownloader::new(
+				protocol_config.protocol_name().clone(),
+				network,
+			)),
 			request_response_config: protocol_config,
 		}
 	}
diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs
index 4988045a4786720771ad84d9bd3b3cb0aa96592a..af88c5245dcb0a4cddac6087abd07c073e97d241 100644
--- a/substrate/client/network/sync/src/blocks.rs
+++ b/substrate/client/network/sync/src/blocks.rs
@@ -17,9 +17,9 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use crate::LOG_TARGET;
-use libp2p::PeerId;
 use log::trace;
 use sc_network_common::sync::message;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block as BlockT, NumberFor, One};
 use std::{
 	cmp,
@@ -262,8 +262,8 @@ impl<B: BlockT> BlockCollection<B> {
 #[cfg(test)]
 mod test {
 	use super::{BlockCollection, BlockData, BlockRangeState};
-	use libp2p::PeerId;
 	use sc_network_common::sync::message;
+	use sc_network_types::PeerId;
 	use sp_core::H256;
 	use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper};
 
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
index ff40ae95624e198c4eb5af64c5af85f9cd180c9a..0ef0b092754c04ccfe5b9fe322f82fc68227dc34 100644
--- a/substrate/client/network/sync/src/engine.rs
+++ b/substrate/client/network/sync/src/engine.rs
@@ -47,7 +47,7 @@ use futures::{
 	future::{BoxFuture, Fuse},
 	FutureExt, StreamExt,
 };
-use libp2p::{request_response::OutboundFailure, PeerId};
+use libp2p::request_response::OutboundFailure;
 use log::{debug, error, trace, warn};
 use prometheus_endpoint::{
 	register, Counter, Gauge, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64,
@@ -59,21 +59,22 @@ use tokio::time::{Interval, MissedTickBehavior};
 use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider};
 use sc_consensus::{import_queue::ImportQueueService, IncomingBlock};
 use sc_network::{
-	config::{
-		FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake,
-		ProtocolId, SetConfig,
-	},
-	peer_store::{PeerStoreHandle, PeerStoreProvider},
+	config::{FullNetworkConfiguration, NotificationHandshake, ProtocolId, SetConfig},
+	peer_store::PeerStoreProvider,
 	request_responses::{IfDisconnected, RequestFailure},
-	service::traits::{Direction, NotificationEvent, ValidationResult},
+	service::{
+		traits::{Direction, NotificationConfig, NotificationEvent, ValidationResult},
+		NotificationMetrics,
+	},
 	types::ProtocolName,
 	utils::LruHashSet,
-	NotificationService, ReputationChange,
+	NetworkBackend, NotificationService, ReputationChange,
 };
 use sc_network_common::{
 	role::Roles,
 	sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState},
 };
+use sc_network_types::PeerId;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_blockchain::{Error as ClientError, HeaderMetadata};
 use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin};
@@ -296,7 +297,7 @@ pub struct SyncingEngine<B: BlockT, Client> {
 	syncing_started: Option<Instant>,
 
 	/// Handle to `PeerStore`.
-	peer_store_handle: PeerStoreHandle,
+	peer_store_handle: Arc<dyn PeerStoreProvider>,
 
 	/// Instant when the last notification was sent or received.
 	last_notification_io: Instant,
@@ -328,11 +329,12 @@ where
 		+ Sync
 		+ 'static,
 {
-	pub fn new(
+	pub fn new<N>(
 		roles: Roles,
 		client: Arc<Client>,
 		metrics_registry: Option<&Registry>,
-		net_config: &FullNetworkConfiguration,
+		network_metrics: NotificationMetrics,
+		net_config: &FullNetworkConfiguration<B, <B as BlockT>::Hash, N>,
 		protocol_id: ProtocolId,
 		fork_id: &Option<String>,
 		block_announce_validator: Box<dyn BlockAnnounceValidator<B> + Send>,
@@ -342,8 +344,11 @@ where
 		block_downloader: Arc<dyn BlockDownloader<B>>,
 		state_request_protocol_name: ProtocolName,
 		warp_sync_protocol_name: Option<ProtocolName>,
-		peer_store_handle: PeerStoreHandle,
-	) -> Result<(Self, SyncingService<B>, NonDefaultSetConfig), ClientError> {
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+	) -> Result<(Self, SyncingService<B>, N::NotificationProtocolConfig), ClientError>
+	where
+		N: NetworkBackend<B, <B as BlockT>::Hash>,
+	{
 		let mode = net_config.network_config.sync_mode;
 		let max_parallel_downloads = net_config.network_config.max_parallel_downloads;
 		let max_blocks_per_request =
@@ -411,18 +416,22 @@ where
 			total.saturating_sub(net_config.network_config.default_peers_set_num_full) as usize
 		};
 
-		let (block_announce_config, notification_service) = Self::get_block_announce_proto_config(
-			protocol_id,
-			fork_id,
-			roles,
-			client.info().best_number,
-			client.info().best_hash,
-			client
-				.block_hash(Zero::zero())
-				.ok()
-				.flatten()
-				.expect("Genesis block exists; qed"),
-		);
+		let (block_announce_config, notification_service) =
+			Self::get_block_announce_proto_config::<N>(
+				protocol_id,
+				fork_id,
+				roles,
+				client.info().best_number,
+				client.info().best_hash,
+				client
+					.block_hash(Zero::zero())
+					.ok()
+					.flatten()
+					.expect("Genesis block exists; qed"),
+				&net_config.network_config.default_peers_set,
+				network_metrics,
+				Arc::clone(&peer_store_handle),
+			);
 
 		// Split warp sync params into warp sync config and a channel to retrieve target block
 		// header.
@@ -1385,14 +1394,17 @@ where
 	}
 
 	/// Get config for the block announcement protocol
-	fn get_block_announce_proto_config(
+	fn get_block_announce_proto_config<N: NetworkBackend<B, <B as BlockT>::Hash>>(
 		protocol_id: ProtocolId,
 		fork_id: &Option<String>,
 		roles: Roles,
 		best_number: NumberFor<B>,
 		best_hash: B::Hash,
 		genesis_hash: B::Hash,
-	) -> (NonDefaultSetConfig, Box<dyn NotificationService>) {
+		set_config: &SetConfig,
+		metrics: NotificationMetrics,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+	) -> (N::NotificationProtocolConfig, Box<dyn NotificationService>) {
 		let block_announces_protocol = {
 			let genesis_hash = genesis_hash.as_ref();
 			if let Some(ref fork_id) = fork_id {
@@ -1406,7 +1418,7 @@ where
 			}
 		};
 
-		NonDefaultSetConfig::new(
+		N::notification_config(
 			block_announces_protocol.into(),
 			iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(),
 			MAX_BLOCK_ANNOUNCE_SIZE,
@@ -1416,14 +1428,9 @@ where
 				best_hash,
 				genesis_hash,
 			))),
-			// NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement
-			// protocol is still hardcoded into the peerset.
-			SetConfig {
-				in_peers: 0,
-				out_peers: 0,
-				reserved_nodes: Vec::new(),
-				non_reserved_mode: NonReservedPeerMode::Deny,
-			},
+			set_config.clone(),
+			metrics,
+			peer_store_handle,
 		)
 	}
 
diff --git a/substrate/client/network/sync/src/justification_requests.rs b/substrate/client/network/sync/src/justification_requests.rs
index 799b6df5831a5783038de15fab2b2eff091d64d9..2b50c85602d78bcbb04ce5ecb23882f81e184d81 100644
--- a/substrate/client/network/sync/src/justification_requests.rs
+++ b/substrate/client/network/sync/src/justification_requests.rs
@@ -26,8 +26,8 @@ use crate::{
 	LOG_TARGET,
 };
 use fork_tree::ForkTree;
-use libp2p::PeerId;
 use log::{debug, trace, warn};
+use sc_network_types::PeerId;
 use sp_blockchain::Error as ClientError;
 use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
 use std::{
diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs
index a4f5eb564c2cd341145d8623564f8cb04725bfbf..741fa7139583f0453d97810cba071fb37ee6e5ee 100644
--- a/substrate/client/network/sync/src/mock.rs
+++ b/substrate/client/network/sync/src/mock.rs
@@ -21,9 +21,9 @@
 use crate::block_relay_protocol::{BlockDownloader as BlockDownloaderT, BlockResponseError};
 
 use futures::channel::oneshot;
-use libp2p::PeerId;
 use sc_network::{ProtocolName, RequestFailure};
 use sc_network_common::sync::message::{BlockData, BlockRequest};
+use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 
 mockall::mock! {
diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs
index 602c69df7ff96b80c9f176dd7646d843f2c63937..7d2d598a2e061b5daa252dd12076cc0534bb44c8 100644
--- a/substrate/client/network/sync/src/pending_responses.rs
+++ b/substrate/client/network/sync/src/pending_responses.rs
@@ -26,9 +26,10 @@ use futures::{
 	stream::{BoxStream, FusedStream, Stream},
 	FutureExt, StreamExt,
 };
-use libp2p::PeerId;
 use log::error;
+
 use sc_network::{request_responses::RequestFailure, types::ProtocolName};
+use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 use std::task::{Context, Poll, Waker};
 use tokio_stream::StreamMap;
diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs
index 420de8cd5fdcf8184e76c29f6beb16492021150e..2e7e12af53d5373f4335361a108cd3980c7c3dcb 100644
--- a/substrate/client/network/sync/src/service/mock.rs
+++ b/substrate/client/network/sync/src/service/mock.rs
@@ -17,17 +17,16 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use futures::channel::oneshot;
-use libp2p::{Multiaddr, PeerId};
 
 use sc_consensus::{BlockImportError, BlockImportStatus};
 use sc_network::{
 	config::MultiaddrWithPeerId,
 	request_responses::{IfDisconnected, RequestFailure},
 	types::ProtocolName,
-	NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest,
-	NotificationSenderError, NotificationSenderT, ReputationChange,
+	Multiaddr, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, ReputationChange,
 };
 use sc_network_common::role::ObservedRole;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
 use std::collections::HashSet;
@@ -80,6 +79,7 @@ mockall::mock! {
 mockall::mock! {
 	pub Network {}
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for Network {
 		fn set_authorized_peers(&self, peers: HashSet<PeerId>);
 		fn set_authorized_only(&self, reserved_only: bool);
@@ -108,6 +108,7 @@ mockall::mock! {
 		) -> Result<(), String>;
 		fn sync_num_connected(&self) -> usize;
 		fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole>;
+		async fn reserved_peers(&self) -> Result<Vec<sc_network_types::PeerId>, ()>;
 	}
 
 	#[async_trait::async_trait]
@@ -130,14 +131,4 @@ mockall::mock! {
 			connect: IfDisconnected,
 		);
 	}
-
-	impl NetworkNotification for Network {
-		fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>);
-		fn notification_sender(
-			&self,
-			target: PeerId,
-			protocol: ProtocolName,
-		) -> Result<Box<dyn NotificationSenderT>, NotificationSenderError>;
-		fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>);
-	}
 }
diff --git a/substrate/client/network/sync/src/service/network.rs b/substrate/client/network/sync/src/service/network.rs
index 07f28519afb2bb988931d71a5df63acf92a360c0..e848b5f62c1b8ab9309bf71029862f0548432097 100644
--- a/substrate/client/network/sync/src/service/network.rs
+++ b/substrate/client/network/sync/src/service/network.rs
@@ -17,21 +17,21 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use futures::{channel::oneshot, StreamExt};
-use libp2p::PeerId;
+use sc_network_types::PeerId;
 
 use sc_network::{
 	request_responses::{IfDisconnected, RequestFailure},
 	types::ProtocolName,
-	NetworkNotification, NetworkPeers, NetworkRequest, ReputationChange,
+	NetworkPeers, NetworkRequest, ReputationChange,
 };
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 
 use std::sync::Arc;
 
 /// Network-related services required by `sc-network-sync`
-pub trait Network: NetworkPeers + NetworkRequest + NetworkNotification {}
+pub trait Network: NetworkPeers + NetworkRequest {}
 
-impl<T> Network for T where T: NetworkPeers + NetworkRequest + NetworkNotification {}
+impl<T> Network for T where T: NetworkPeers + NetworkRequest {}
 
 /// Network service provider for `ChainSync`
 ///
@@ -57,12 +57,6 @@ pub enum ToServiceCommand {
 		oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
 		IfDisconnected,
 	),
-
-	/// Call `NetworkNotification::write_notification()`
-	WriteNotification(PeerId, ProtocolName, Vec<u8>),
-
-	/// Call `NetworkNotification::set_notification_handshake()`
-	SetNotificationHandshake(ProtocolName, Vec<u8>),
 }
 
 /// Handle that is (temporarily) passed to `ChainSync` so it can
@@ -101,20 +95,6 @@ impl NetworkServiceHandle {
 			.tx
 			.unbounded_send(ToServiceCommand::StartRequest(who, protocol, request, tx, connect));
 	}
-
-	/// Send notification to peer
-	pub fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>) {
-		let _ = self
-			.tx
-			.unbounded_send(ToServiceCommand::WriteNotification(who, protocol, message));
-	}
-
-	/// Set handshake for the notification protocol.
-	pub fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
-		let _ = self
-			.tx
-			.unbounded_send(ToServiceCommand::SetNotificationHandshake(protocol, handshake));
-	}
 }
 
 impl NetworkServiceProvider {
@@ -135,10 +115,6 @@ impl NetworkServiceProvider {
 					service.report_peer(peer, reputation_change),
 				ToServiceCommand::StartRequest(peer, protocol, request, tx, connect) =>
 					service.start_request(peer, protocol, request, None, tx, connect),
-				ToServiceCommand::WriteNotification(peer, protocol, message) =>
-					service.write_notification(peer, protocol, message),
-				ToServiceCommand::SetNotificationHandshake(protocol, handshake) =>
-					service.set_notification_handshake(protocol, handshake),
 			}
 		}
 	}
diff --git a/substrate/client/network/sync/src/service/syncing_service.rs b/substrate/client/network/sync/src/service/syncing_service.rs
index 92d649d65dc3a5e5fac70c5a1cc16ed4c825d400..f4bc58afd4fda16a825acba25d64fa319460a25b 100644
--- a/substrate/client/network/sync/src/service/syncing_service.rs
+++ b/substrate/client/network/sync/src/service/syncing_service.rs
@@ -19,7 +19,7 @@
 use crate::types::{ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider};
 
 use futures::{channel::oneshot, Stream};
-use libp2p::PeerId;
+use sc_network_types::PeerId;
 
 use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link};
 use sc_network::{NetworkBlock, NetworkSyncForkRequest};
diff --git a/substrate/client/network/sync/src/state_request_handler.rs b/substrate/client/network/sync/src/state_request_handler.rs
index 6bd2389fb5d1b78b071fc8cad2b6d5a566369a05..0e713626ecaa3433d371ac762a62def0f0d6af03 100644
--- a/substrate/client/network/sync/src/state_request_handler.rs
+++ b/substrate/client/network/sync/src/state_request_handler.rs
@@ -24,15 +24,16 @@ use crate::{
 
 use codec::{Decode, Encode};
 use futures::{channel::oneshot, stream::StreamExt};
-use libp2p::PeerId;
 use log::{debug, trace};
 use prost::Message;
+use sc_network_types::PeerId;
 use schnellru::{ByLength, LruMap};
 
 use sc_client_api::{BlockBackend, ProofProvider};
 use sc_network::{
 	config::ProtocolId,
-	request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
+	request_responses::{IncomingRequest, OutgoingResponse},
+	NetworkBackend,
 };
 use sp_runtime::traits::Block as BlockT;
 
@@ -52,21 +53,26 @@ mod rep {
 	pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times");
 }
 
-/// Generates a [`ProtocolConfig`] for the state request protocol, refusing incoming requests.
-pub fn generate_protocol_config<Hash: AsRef<[u8]>>(
+/// Generates a `RequestResponseProtocolConfig` for the state request protocol, refusing incoming
+/// requests.
+pub fn generate_protocol_config<
+	Hash: AsRef<[u8]>,
+	B: BlockT,
+	N: NetworkBackend<B, <B as BlockT>::Hash>,
+>(
 	protocol_id: &ProtocolId,
 	genesis_hash: Hash,
 	fork_id: Option<&str>,
-) -> ProtocolConfig {
-	ProtocolConfig {
-		name: generate_protocol_name(genesis_hash, fork_id).into(),
-		fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into())
-			.collect(),
-		max_request_size: 1024 * 1024,
-		max_response_size: 16 * 1024 * 1024,
-		request_timeout: Duration::from_secs(40),
-		inbound_queue: None,
-	}
+	inbound_queue: async_channel::Sender<IncomingRequest>,
+) -> N::RequestResponseProtocolConfig {
+	N::request_response_config(
+		generate_protocol_name(genesis_hash, fork_id).into(),
+		std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
+		1024 * 1024,
+		16 * 1024 * 1024,
+		Duration::from_secs(40),
+		Some(inbound_queue),
+	)
 }
 
 /// Generate the state protocol name from the genesis hash and fork id.
@@ -125,18 +131,18 @@ where
 	Client: BlockBackend<B> + ProofProvider<B> + Send + Sync + 'static,
 {
 	/// Create a new [`StateRequestHandler`].
-	pub fn new(
+	pub fn new<N: NetworkBackend<B, <B as BlockT>::Hash>>(
 		protocol_id: &ProtocolId,
 		fork_id: Option<&str>,
 		client: Arc<Client>,
 		num_peer_hint: usize,
-	) -> (Self, ProtocolConfig) {
+	) -> (Self, N::RequestResponseProtocolConfig) {
 		// Reserve enough request slots for one request per peer when we are at the maximum
 		// number of peers.
 		let capacity = std::cmp::max(num_peer_hint, 1);
 		let (tx, request_receiver) = async_channel::bounded(capacity);
 
-		let mut protocol_config = generate_protocol_config(
+		let protocol_config = generate_protocol_config::<_, B, N>(
 			protocol_id,
 			client
 				.block_hash(0u32.into())
@@ -144,8 +150,8 @@ where
 				.flatten()
 				.expect("Genesis block exists; qed"),
 			fork_id,
+			tx,
 		);
-		protocol_config.inbound_queue = Some(tx);
 
 		let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2);
 		let seen_requests = LruMap::new(capacity);
diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs
index 610fd7c65606b18d7eb9dc018636f1fac3ff35f5..b7afcbdb3a789c7813b8fcd60374e11a2fc4de74 100644
--- a/substrate/client/network/sync/src/strategy.rs
+++ b/substrate/client/network/sync/src/strategy.rs
@@ -29,7 +29,6 @@ use crate::{
 	LOG_TARGET,
 };
 use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode};
-use libp2p::PeerId;
 use log::{debug, error, info, warn};
 use prometheus_endpoint::Registry;
 use sc_client_api::{BlockBackend, ProofProvider};
@@ -38,6 +37,7 @@ use sc_network_common::sync::{
 	message::{BlockAnnounce, BlockData, BlockRequest},
 	SyncMode,
 };
+use sc_network_types::PeerId;
 use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
 use sp_consensus::BlockOrigin;
 use sp_runtime::{
diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs
index da04bbbeccc889250f57818e9a14e4f25400093d..1a7a18f2ea796146e1ec010668868b0fd3709146 100644
--- a/substrate/client/network/sync/src/strategy/chain_sync.rs
+++ b/substrate/client/network/sync/src/strategy/chain_sync.rs
@@ -41,7 +41,6 @@ use crate::{
 };
 
 use codec::Encode;
-use libp2p::PeerId;
 use log::{debug, error, info, trace, warn};
 use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64};
 use sc_client_api::{BlockBackend, ProofProvider};
@@ -49,6 +48,7 @@ use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
 use sc_network_common::sync::message::{
 	BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock,
 };
+use sc_network_types::PeerId;
 use sp_arithmetic::traits::Saturating;
 use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
 use sp_consensus::{BlockOrigin, BlockStatus};
diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs
index 6d3b215f7f3210073c2ab61a2e57b0abf9652150..c21cb22e40bb10a3179ad159f4709742aaf777cc 100644
--- a/substrate/client/network/sync/src/strategy/state.rs
+++ b/substrate/client/network/sync/src/strategy/state.rs
@@ -24,11 +24,11 @@ use crate::{
 	types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus},
 	LOG_TARGET,
 };
-use libp2p::PeerId;
 use log::{debug, error, trace};
 use sc_client_api::ProofProvider;
 use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
 use sc_network_common::sync::message::BlockAnnounce;
+use sc_network_types::PeerId;
 use sp_consensus::BlockOrigin;
 use sp_runtime::{
 	traits::{Block as BlockT, Header, NumberFor},
diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs
index c7b79228efeef37c5579462bc3fd9ba1978b78cb..754f1f52bfd2109e57455c32ba2ff303320b9b33 100644
--- a/substrate/client/network/sync/src/strategy/warp.rs
+++ b/substrate/client/network/sync/src/strategy/warp.rs
@@ -27,11 +27,11 @@ use crate::{
 };
 use codec::{Decode, Encode};
 use futures::channel::oneshot;
-use libp2p::PeerId;
 use log::{debug, error, trace};
 use sc_network_common::sync::message::{
 	BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock,
 };
+use sc_network_types::PeerId;
 use sp_blockchain::HeaderBackend;
 use sp_runtime::{
 	traits::{Block as BlockT, Header, NumberFor, Zero},
diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs
index 4074b33eee1a90bdb2f5596ef82e76b254cf8751..e8b8c89003609e009f75b1e06bdaa87d900d24a2 100644
--- a/substrate/client/network/sync/src/types.rs
+++ b/substrate/client/network/sync/src/types.rs
@@ -21,10 +21,10 @@
 use futures::Stream;
 use sc_network_common::{role::Roles, types::ReputationChange};
 
-use libp2p::PeerId;
-
 use crate::strategy::{state_sync::StateSyncProgress, warp::WarpSyncProgress};
+
 use sc_network_common::sync::message::BlockRequest;
+use sc_network_types::PeerId;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
 use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc};
diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs
index eda67cac95ffae2f423ba062308192371c1a2f20..371b04ec9e4d50a009d1a62e372ebf471ed0e243 100644
--- a/substrate/client/network/sync/src/warp_request_handler.rs
+++ b/substrate/client/network/sync/src/warp_request_handler.rs
@@ -26,9 +26,8 @@ use crate::{
 };
 use sc_network::{
 	config::ProtocolId,
-	request_responses::{
-		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
-	},
+	request_responses::{IncomingRequest, OutgoingResponse},
+	NetworkBackend,
 };
 use sp_runtime::traits::Block as BlockT;
 
@@ -39,22 +38,26 @@ const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024;
 /// Incoming warp requests bounded queue size.
 const MAX_WARP_REQUEST_QUEUE: usize = 20;
 
-/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing
+/// Generates a `RequestResponseProtocolConfig` for the grandpa warp sync request protocol, refusing
 /// incoming requests.
-pub fn generate_request_response_config<Hash: AsRef<[u8]>>(
+pub fn generate_request_response_config<
+	Hash: AsRef<[u8]>,
+	B: BlockT,
+	N: NetworkBackend<B, <B as BlockT>::Hash>,
+>(
 	protocol_id: ProtocolId,
 	genesis_hash: Hash,
 	fork_id: Option<&str>,
-) -> RequestResponseConfig {
-	RequestResponseConfig {
-		name: generate_protocol_name(genesis_hash, fork_id).into(),
-		fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into())
-			.collect(),
-		max_request_size: 32,
-		max_response_size: MAX_RESPONSE_SIZE,
-		request_timeout: Duration::from_secs(10),
-		inbound_queue: None,
-	}
+	inbound_queue: async_channel::Sender<IncomingRequest>,
+) -> N::RequestResponseProtocolConfig {
+	N::request_response_config(
+		generate_protocol_name(genesis_hash, fork_id).into(),
+		std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
+		32,
+		MAX_RESPONSE_SIZE,
+		Duration::from_secs(10),
+		Some(inbound_queue),
+	)
 }
 
 /// Generate the grandpa warp sync protocol name from the genesis hash and fork id.
@@ -80,17 +83,20 @@ pub struct RequestHandler<TBlock: BlockT> {
 
 impl<TBlock: BlockT> RequestHandler<TBlock> {
 	/// Create a new [`RequestHandler`].
-	pub fn new<Hash: AsRef<[u8]>>(
+	pub fn new<Hash: AsRef<[u8]>, N: NetworkBackend<TBlock, <TBlock as BlockT>::Hash>>(
 		protocol_id: ProtocolId,
 		genesis_hash: Hash,
 		fork_id: Option<&str>,
 		backend: Arc<dyn WarpSyncProvider<TBlock>>,
-	) -> (Self, RequestResponseConfig) {
+	) -> (Self, N::RequestResponseProtocolConfig) {
 		let (tx, request_receiver) = async_channel::bounded(MAX_WARP_REQUEST_QUEUE);
 
-		let mut request_response_config =
-			generate_request_response_config(protocol_id, genesis_hash, fork_id);
-		request_response_config.inbound_queue = Some(tx);
+		let request_response_config = generate_request_response_config::<_, TBlock, N>(
+			protocol_id,
+			genesis_hash,
+			fork_id,
+			tx,
+		);
 
 		(Self { backend, request_receiver }, request_response_config)
 	}
diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml
index 56fc89e1b2b95d9a5570ad4c833db01bb22f15c3..f70e4847f59f32ee7418b370cb60339d3ff12ef7 100644
--- a/substrate/client/network/test/Cargo.toml
+++ b/substrate/client/network/test/Cargo.toml
@@ -29,6 +29,7 @@ sc-client-api = { path = "../../api" }
 sc-consensus = { path = "../../consensus/common" }
 sc-network = { path = ".." }
 sc-network-common = { path = "../common" }
+sc-network-types = { path = "../types" }
 sc-utils = { path = "../../utils" }
 sc-network-light = { path = "../light" }
 sc-network-sync = { path = "../sync" }
diff --git a/substrate/client/network/test/src/block_import.rs b/substrate/client/network/test/src/block_import.rs
index 35795432b37f4d47904c50f6507a7e555ca798a4..690a579e0272bce6fc0123deb66e528635afb9a2 100644
--- a/substrate/client/network/test/src/block_import.rs
+++ b/substrate/client/network/test/src/block_import.rs
@@ -58,7 +58,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock<Block>)
 			body: Some(Vec::new()),
 			indexed_body: None,
 			justifications,
-			origin: Some(peer_id),
+			origin: Some(peer_id.into()),
 			allow_missing_state: false,
 			import_existing: false,
 			state: None,
@@ -81,7 +81,7 @@ fn import_single_good_block_works() {
 		&mut PassThroughVerifier::new(true),
 	)) {
 		Ok(BlockImportStatus::ImportedUnknown(ref num, ref aux, ref org))
-			if *num == number && *aux == expected_aux && *org == Some(peer_id) => {},
+			if *num == number && *aux == expected_aux && *org == Some(peer_id.into()) => {},
 		r @ _ => panic!("{:?}", r),
 	}
 }
@@ -110,7 +110,7 @@ fn import_single_good_block_without_header_fails() {
 		block,
 		&mut PassThroughVerifier::new(true),
 	)) {
-		Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {},
+		Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id.into()) => {},
 		_ => panic!(),
 	}
 }
diff --git a/substrate/client/network/test/src/fuzz.rs b/substrate/client/network/test/src/fuzz.rs
index 2e288accd80bc4f43214db8d4d7fc74611b82a2e..69d08d47d26a9ea4368ffdfea456711345d15d8b 100644
--- a/substrate/client/network/test/src/fuzz.rs
+++ b/substrate/client/network/test/src/fuzz.rs
@@ -20,7 +20,6 @@
 //! and `PeerStore` to discover possible inconsistencies in peer management.
 
 use futures::prelude::*;
-use libp2p::PeerId;
 use rand::{
 	distributions::{Distribution, Uniform, WeightedIndex},
 	seq::IteratorRandom,
@@ -28,10 +27,13 @@ use rand::{
 use sc_network::{
 	peer_store::{PeerStore, PeerStoreProvider},
 	protocol_controller::{IncomingIndex, Message, ProtoSetConfig, ProtocolController, SetId},
-	ReputationChange,
+	PeerId, ReputationChange,
 };
 use sc_utils::mpsc::tracing_unbounded;
-use std::collections::{HashMap, HashSet};
+use std::{
+	collections::{HashMap, HashSet},
+	sync::Arc,
+};
 
 /// Peer events as observed by `Notifications` / fuzz test.
 #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
@@ -141,7 +143,7 @@ async fn test_once() {
 		.collect();
 
 	let peer_store = PeerStore::new(bootnodes);
-	let mut peer_store_handle = peer_store.handle();
+	let peer_store_handle = peer_store.handle();
 
 	let (to_notifications, mut from_controller) =
 		tracing_unbounded("test_to_notifications", 10_000);
@@ -163,7 +165,7 @@ async fn test_once() {
 			reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0,
 		},
 		to_notifications,
-		Box::new(peer_store_handle.clone()),
+		Arc::new(peer_store_handle.clone()),
 	);
 
 	tokio::spawn(peer_store.run());
@@ -319,14 +321,15 @@ async fn test_once() {
 				1 => {
 					let new_id = PeerId::random();
 					known_nodes.insert(new_id, State::Disconnected);
-					peer_store_handle.add_known_peer(new_id);
+					peer_store_handle.add_known_peer(new_id.into());
 				},
 
 				// If we generate 2, adjust a random reputation.
 				2 =>
 					if let Some(id) = known_nodes.keys().choose(&mut rng) {
 						let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng);
-						peer_store_handle.report_peer(*id, ReputationChange::new(val, ""));
+						let peer: sc_network_types::PeerId = id.into();
+						peer_store_handle.report_peer(peer, ReputationChange::new(val, ""));
 					},
 
 				// If we generate 3, disconnect from a random node.
@@ -414,5 +417,6 @@ async fn test_once() {
 			}
 		}
 	})
-	.await;
+	.await
+	.unwrap();
 }
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index aeed2985ace4821c4a5a7e54b3e73555e06fb11e..1dfe7d4454e9949f9ba9be08998363d77b5993a6 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -58,7 +58,7 @@ use sc_network::{
 	request_responses::ProtocolConfig as RequestResponseConfig,
 	types::ProtocolName,
 	Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest,
-	NetworkWorker, NotificationService,
+	NetworkWorker, NotificationMetrics, NotificationService,
 };
 use sc_network_common::role::Roles;
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -248,7 +248,7 @@ where
 {
 	/// Get this peer ID.
 	pub fn id(&self) -> PeerId {
-		self.network.service().local_peer_id()
+		self.network.service().local_peer_id().into()
 	}
 
 	/// Returns true if we're major syncing.
@@ -295,7 +295,11 @@ where
 		hash: <Block as BlockT>::Hash,
 		number: NumberFor<Block>,
 	) {
-		self.sync_service.set_sync_fork_request(peers, hash, number);
+		self.sync_service.set_sync_fork_request(
+			peers.into_iter().map(From::from).collect(),
+			hash,
+			number,
+		);
 	}
 
 	/// Add blocks to the peer -- edit the block before adding
@@ -829,7 +833,7 @@ pub trait TestNetFactory: Default + Sized + Send {
 
 		let (chain_sync_network_provider, chain_sync_network_handle) =
 			NetworkServiceProvider::new();
-		let mut block_relay_params = BlockRequestHandler::new(
+		let mut block_relay_params = BlockRequestHandler::new::<NetworkWorker<_, _>>(
 			chain_sync_network_handle.clone(),
 			&protocol_id,
 			None,
@@ -841,18 +845,24 @@ pub trait TestNetFactory: Default + Sized + Send {
 		}));
 
 		let state_request_protocol_config = {
-			let (handler, protocol_config) =
-				StateRequestHandler::new(&protocol_id, None, client.clone(), 50);
+			let (handler, protocol_config) = StateRequestHandler::new::<NetworkWorker<_, _>>(
+				&protocol_id,
+				None,
+				client.clone(),
+				50,
+			);
 			self.spawn_task(handler.run().boxed());
 			protocol_config
 		};
 
-		let light_client_request_protocol_config = {
-			let (handler, protocol_config) =
-				LightClientRequestHandler::new(&protocol_id, None, client.clone());
-			self.spawn_task(handler.run().boxed());
-			protocol_config
-		};
+		let light_client_request_protocol_config =
+			{
+				let (handler, protocol_config) = LightClientRequestHandler::new::<
+					NetworkWorker<_, _>,
+				>(&protocol_id, None, client.clone());
+				self.spawn_task(handler.run().boxed());
+				protocol_config
+			};
 
 		let warp_sync = Arc::new(TestWarpSyncProvider(client.clone()));
 
@@ -866,35 +876,45 @@ pub trait TestNetFactory: Default + Sized + Send {
 		};
 
 		let warp_protocol_config = {
-			let (handler, protocol_config) = warp_request_handler::RequestHandler::new(
-				protocol_id.clone(),
-				client
-					.block_hash(0u32.into())
-					.ok()
-					.flatten()
-					.expect("Genesis block exists; qed"),
-				None,
-				warp_sync.clone(),
-			);
+			let (handler, protocol_config) =
+				warp_request_handler::RequestHandler::new::<_, NetworkWorker<_, _>>(
+					protocol_id.clone(),
+					client
+						.block_hash(0u32.into())
+						.ok()
+						.flatten()
+						.expect("Genesis block exists; qed"),
+					None,
+					warp_sync.clone(),
+				);
 			self.spawn_task(handler.run().boxed());
 			protocol_config
 		};
 
 		let peer_store = PeerStore::new(
-			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
+			network_config
+				.boot_nodes
+				.iter()
+				.map(|bootnode| bootnode.peer_id.into())
+				.collect(),
 		);
-		let peer_store_handle = peer_store.handle();
+		let peer_store_handle = Arc::new(peer_store.handle());
 		self.spawn_task(peer_store.run().boxed());
 
 		let block_announce_validator = config
 			.block_announce_validator
 			.unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator));
+		let metrics = <NetworkWorker<_, _> as sc_network::NetworkBackend<
+			Block,
+			<Block as BlockT>::Hash,
+		>>::register_notification_metrics(None);
 
 		let (engine, sync_service, block_announce_config) =
 			sc_network_sync::engine::SyncingEngine::new(
 				Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
 				client.clone(),
 				None,
+				metrics,
 				&full_net_config,
 				protocol_id.clone(),
 				&fork_id,
@@ -935,12 +955,13 @@ pub trait TestNetFactory: Default + Sized + Send {
 				tokio::spawn(f);
 			}),
 			network_config: full_net_config,
-			peer_store: peer_store_handle,
 			genesis_hash,
 			protocol_id,
 			fork_id,
 			metrics_registry: None,
 			block_announce_config,
+			bitswap_config: None,
+			notification_metrics: NotificationMetrics::new(None),
 		})
 		.unwrap();
 
@@ -961,8 +982,10 @@ pub trait TestNetFactory: Default + Sized + Send {
 
 		self.mut_peers(move |peers| {
 			for peer in peers.iter_mut() {
-				peer.network
-					.add_known_address(network.service().local_peer_id(), listen_addr.clone());
+				peer.network.add_known_address(
+					network.service().local_peer_id().into(),
+					listen_addr.clone(),
+				);
 			}
 
 			let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse());
diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs
index 800c0d4369c2cf3871e206af0321050375ba2ad9..150c1db7560e6d589e21e9aecb2add06fc57a47c 100644
--- a/substrate/client/network/test/src/service.rs
+++ b/substrate/client/network/test/src/service.rs
@@ -17,16 +17,15 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use futures::prelude::*;
-use libp2p::{Multiaddr, PeerId};
 
 use sc_consensus::{ImportQueue, Link};
 use sc_network::{
 	config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig},
 	event::Event,
-	peer_store::PeerStore,
+	peer_store::{PeerStore, PeerStoreProvider},
 	service::traits::{NotificationEvent, ValidationResult},
-	NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker,
-	NotificationService,
+	Multiaddr, NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker,
+	NotificationMetrics, NotificationService, PeerId,
 };
 use sc_network_common::role::Roles;
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -159,41 +158,54 @@ impl TestNetworkBuilder {
 
 		let (chain_sync_network_provider, chain_sync_network_handle) =
 			self.chain_sync_network.unwrap_or(NetworkServiceProvider::new());
-		let mut block_relay_params = BlockRequestHandler::new(
-			chain_sync_network_handle.clone(),
-			&protocol_id,
-			None,
-			client.clone(),
-			50,
-		);
+		let mut block_relay_params =
+			BlockRequestHandler::new::<
+				NetworkWorker<
+					substrate_test_runtime_client::runtime::Block,
+					substrate_test_runtime_client::runtime::Hash,
+				>,
+			>(chain_sync_network_handle.clone(), &protocol_id, None, client.clone(), 50);
 		tokio::spawn(Box::pin(async move {
 			block_relay_params.server.run().await;
 		}));
 
 		let state_request_protocol_config = {
-			let (handler, protocol_config) =
-				StateRequestHandler::new(&protocol_id, None, client.clone(), 50);
+			let (handler, protocol_config) = StateRequestHandler::new::<
+				NetworkWorker<
+					substrate_test_runtime_client::runtime::Block,
+					substrate_test_runtime_client::runtime::Hash,
+				>,
+			>(&protocol_id, None, client.clone(), 50);
 			tokio::spawn(handler.run().boxed());
 			protocol_config
 		};
 
 		let light_client_request_protocol_config = {
-			let (handler, protocol_config) =
-				LightClientRequestHandler::new(&protocol_id, None, client.clone());
+			let (handler, protocol_config) = LightClientRequestHandler::new::<
+				NetworkWorker<
+					substrate_test_runtime_client::runtime::Block,
+					substrate_test_runtime_client::runtime::Hash,
+				>,
+			>(&protocol_id, None, client.clone());
 			tokio::spawn(handler.run().boxed());
 			protocol_config
 		};
 
 		let peer_store = PeerStore::new(
-			network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
+			network_config
+				.boot_nodes
+				.iter()
+				.map(|bootnode| bootnode.peer_id.into())
+				.collect(),
 		);
-		let peer_store_handle = peer_store.handle();
+		let peer_store_handle: Arc<dyn PeerStoreProvider> = Arc::new(peer_store.handle());
 		tokio::spawn(peer_store.run().boxed());
 
 		let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new(
 			Roles::from(&config::Role::Full),
 			client.clone(),
 			None,
+			NotificationMetrics::new(None),
 			&full_net_config,
 			protocol_id.clone(),
 			&None,
@@ -204,7 +216,7 @@ impl TestNetworkBuilder {
 			block_relay_params.downloader,
 			state_request_protocol_config.name.clone(),
 			None,
-			peer_store_handle.clone(),
+			Arc::clone(&peer_store_handle),
 		)
 		.unwrap();
 		let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone()));
@@ -239,7 +251,11 @@ impl TestNetworkBuilder {
 		let worker = NetworkWorker::<
 			substrate_test_runtime_client::runtime::Block,
 			substrate_test_runtime_client::runtime::Hash,
-		>::new(config::Params::<substrate_test_runtime_client::runtime::Block> {
+		>::new(config::Params::<
+			substrate_test_runtime_client::runtime::Block,
+			substrate_test_runtime_client::runtime::Hash,
+			NetworkWorker<_, _>,
+		> {
 			block_announce_config,
 			role: config::Role::Full,
 			executor: Box::new(|f| {
@@ -247,10 +263,11 @@ impl TestNetworkBuilder {
 			}),
 			genesis_hash,
 			network_config: full_net_config,
-			peer_store: peer_store_handle,
 			protocol_id,
 			fork_id,
 			metrics_registry: None,
+			bitswap_config: None,
+			notification_metrics: NotificationMetrics::new(None),
 		})
 		.unwrap();
 
@@ -670,7 +687,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 	let boot_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)],
-		peer_id: PeerId::random(),
+		peer_id: PeerId::random().into(),
 	};
 
 	let _ = TestNetworkBuilder::new()
@@ -696,7 +713,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 	let boot_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Memory(rand::random::<u64>())],
-		peer_id: PeerId::random(),
+		peer_id: PeerId::random().into(),
 	};
 
 	let _ = TestNetworkBuilder::new()
@@ -721,7 +738,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 	let reserved_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)],
-		peer_id: PeerId::random(),
+		peer_id: PeerId::random().into(),
 	};
 
 	let _ = TestNetworkBuilder::new()
@@ -750,7 +767,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 	let reserved_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Memory(rand::random::<u64>())],
-		peer_id: PeerId::random(),
+		peer_id: PeerId::random().into(),
 	};
 
 	let _ = TestNetworkBuilder::new()
diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml
index 0ab7386ef21ffd7fb77a440b751330ac84bce75f..d74636d60a7da57edff1a14e003b623b252c94bc 100644
--- a/substrate/client/network/transactions/Cargo.toml
+++ b/substrate/client/network/transactions/Cargo.toml
@@ -25,6 +25,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../..
 sc-network = { path = ".." }
 sc-network-common = { path = "../common" }
 sc-network-sync = { path = "../sync" }
+sc-network-types = { path = "../types" }
 sc-utils = { path = "../../utils" }
 sp-runtime = { path = "../../../primitives/runtime" }
 sp-consensus = { path = "../../../primitives/consensus/common" }
diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs
index b2299667448ca88ecda779a66b7b773605442357..3384aab5149dcb5b6127d75e744f010db950b5d1 100644
--- a/substrate/client/network/transactions/src/lib.rs
+++ b/substrate/client/network/transactions/src/lib.rs
@@ -30,20 +30,24 @@ use crate::config::*;
 
 use codec::{Decode, Encode};
 use futures::{prelude::*, stream::FuturesUnordered};
-use libp2p::{multiaddr, PeerId};
 use log::{debug, trace, warn};
 
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
-	config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig},
-	error,
-	service::traits::{NotificationEvent, NotificationService, ValidationResult},
+	config::{NonReservedPeerMode, ProtocolId, SetConfig},
+	error, multiaddr,
+	peer_store::PeerStoreProvider,
+	service::{
+		traits::{NotificationEvent, NotificationService, ValidationResult},
+		NotificationMetrics,
+	},
 	types::ProtocolName,
 	utils::{interval, LruHashSet},
-	NetworkEventStream, NetworkNotification, NetworkPeers,
+	NetworkBackend, NetworkEventStream, NetworkPeers,
 };
 use sc_network_common::{role::ObservedRole, ExHashT};
 use sc_network_sync::{SyncEvent, SyncEventStream};
+use sc_network_types::PeerId;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_runtime::traits::Block as BlockT;
 
@@ -61,6 +65,9 @@ pub mod config;
 /// A set of transactions.
 pub type Transactions<E> = Vec<E>;
 
+/// Logging target for the file.
+const LOG_TARGET: &str = "sync";
+
 mod rep {
 	use sc_network::ReputationChange as Rep;
 	/// Reputation change when a peer sends us any transaction.
@@ -124,11 +131,17 @@ pub struct TransactionsHandlerPrototype {
 
 impl TransactionsHandlerPrototype {
 	/// Create a new instance.
-	pub fn new<Hash: AsRef<[u8]>>(
+	pub fn new<
+		Hash: AsRef<[u8]>,
+		Block: BlockT,
+		Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+	>(
 		protocol_id: ProtocolId,
 		genesis_hash: Hash,
 		fork_id: Option<&str>,
-	) -> (Self, NonDefaultSetConfig) {
+		metrics: NotificationMetrics,
+		peer_store_handle: Arc<dyn PeerStoreProvider>,
+	) -> (Self, Net::NotificationProtocolConfig) {
 		let genesis_hash = genesis_hash.as_ref();
 		let protocol_name: ProtocolName = if let Some(fork_id) = fork_id {
 			format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id)
@@ -136,7 +149,7 @@ impl TransactionsHandlerPrototype {
 			format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash))
 		}
 		.into();
-		let (config, notification_service) = NonDefaultSetConfig::new(
+		let (config, notification_service) = Net::notification_config(
 			protocol_name.clone(),
 			vec![format!("/{}/transactions/1", protocol_id.as_ref()).into()],
 			MAX_TRANSACTIONS_SIZE,
@@ -147,6 +160,8 @@ impl TransactionsHandlerPrototype {
 				reserved_nodes: Vec::new(),
 				non_reserved_mode: NonReservedPeerMode::Deny,
 			},
+			metrics,
+			peer_store_handle,
 		);
 
 		(Self { protocol_name, notification_service }, config)
@@ -160,7 +175,7 @@ impl TransactionsHandlerPrototype {
 	pub fn build<
 		B: BlockT + 'static,
 		H: ExHashT,
-		N: NetworkPeers + NetworkEventStream + NetworkNotification,
+		N: NetworkPeers + NetworkEventStream,
 		S: SyncEventStream + sp_consensus::SyncOracle,
 	>(
 		self,
@@ -231,7 +246,7 @@ enum ToHandler<H: ExHashT> {
 pub struct TransactionsHandler<
 	B: BlockT + 'static,
 	H: ExHashT,
-	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	N: NetworkPeers + NetworkEventStream,
 	S: SyncEventStream + sp_consensus::SyncOracle,
 > {
 	protocol_name: ProtocolName,
@@ -272,7 +287,7 @@ impl<B, H, N, S> TransactionsHandler<B, H, N, S>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	N: NetworkPeers + NetworkEventStream,
 	S: SyncEventStream + sp_consensus::SyncOracle,
 {
 	/// Turns the [`TransactionsHandler`] into a future that should run forever and not be
@@ -369,7 +384,7 @@ where
 					iter::once(addr).collect(),
 				);
 				if let Err(err) = result {
-					log::error!(target: "sync", "Add reserved peer failed: {}", err);
+					log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err);
 				}
 			},
 			SyncEvent::PeerDisconnected(remote) => {
@@ -378,7 +393,7 @@ where
 					iter::once(remote).collect(),
 				);
 				if let Err(err) = result {
-					log::error!(target: "sync", "Remove reserved peer failed: {}", err);
+					log::error!(target: LOG_TARGET, "Remove reserved peer failed: {}", err);
 				}
 			},
 		}
@@ -388,16 +403,16 @@ where
 	fn on_transactions(&mut self, who: PeerId, transactions: Transactions<B::Extrinsic>) {
 		// Accept transactions only when node is not major syncing
 		if self.sync.is_major_syncing() {
-			trace!(target: "sync", "{} Ignoring transactions while major syncing", who);
+			trace!(target: LOG_TARGET, "{} Ignoring transactions while major syncing", who);
 			return
 		}
 
-		trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who);
+		trace!(target: LOG_TARGET, "Received {} transactions from {}", transactions.len(), who);
 		if let Some(ref mut peer) = self.peers.get_mut(&who) {
 			for t in transactions {
 				if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS {
 					debug!(
-						target: "sync",
+						target: LOG_TARGET,
 						"Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit",
 						MAX_PENDING_TRANSACTIONS,
 					);
@@ -442,7 +457,7 @@ where
 			return
 		}
 
-		debug!(target: "sync", "Propagating transaction [{:?}]", hash);
+		debug!(target: LOG_TARGET, "Propagating transaction [{:?}]", hash);
 		if let Some(transaction) = self.transaction_pool.transaction(hash) {
 			let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]);
 			self.transaction_pool.on_broadcasted(propagated_to);
@@ -506,7 +521,7 @@ where
 			return
 		}
 
-		debug!(target: "sync", "Propagating transactions");
+		debug!(target: LOG_TARGET, "Propagating transactions");
 		let transactions = self.transaction_pool.transactions();
 		let propagated_to = self.do_propagate_transactions(&transactions);
 		self.transaction_pool.on_broadcasted(propagated_to);
diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..d8f03939ab96c303bd4ae862cd76df2ff77e0c91
--- /dev/null
+++ b/substrate/client/network/types/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+description = "Substrate network types"
+name = "sc-network-types"
+version = "0.10.0-dev"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+authors.workspace = true
+edition.workspace = true
+homepage = "https://substrate.io"
+repository.workspace = true
+documentation = "https://docs.rs/sc-network-types"
+
+[dependencies]
+bs58 = "0.4.0"
+libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] }
+litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" }
+multiaddr = "0.17.0"
+multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] }
+rand = "0.8.5"
+thiserror = "1.0.48"
diff --git a/substrate/client/network/types/src/lib.rs b/substrate/client/network/types/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9a126c48c7eab66ab39dd55a081a3a938aa16b92
--- /dev/null
+++ b/substrate/client/network/types/src/lib.rs
@@ -0,0 +1,18 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+mod peer_id;
+
+pub use peer_id::PeerId;
diff --git a/substrate/client/network/types/src/peer_id.rs b/substrate/client/network/types/src/peer_id.rs
new file mode 100644
index 0000000000000000000000000000000000000000..44d4fa99252be17325ea8c2010da0518943815c9
--- /dev/null
+++ b/substrate/client/network/types/src/peer_id.rs
@@ -0,0 +1,248 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+use multiaddr::{Multiaddr, Protocol};
+use multihash::{Code, Error, Multihash};
+use rand::Rng;
+
+use std::{fmt, hash::Hash, str::FromStr};
+
+/// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be
+/// automatically used as the peer id using an identity multihash.
+const MAX_INLINE_KEY_LENGTH: usize = 42;
+
+/// Identifier of a peer of the network.
+///
+/// The data is a CIDv0 compatible multihash of the protobuf encoded public key of the peer
+/// as specified in [specs/peer-ids](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md).
+#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct PeerId {
+	multihash: Multihash,
+}
+
+impl fmt::Debug for PeerId {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		f.debug_tuple("PeerId").field(&self.to_base58()).finish()
+	}
+}
+
+impl fmt::Display for PeerId {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		self.to_base58().fmt(f)
+	}
+}
+
+impl PeerId {
+	/// Generate random peer ID.
+	pub fn random() -> PeerId {
+		let peer = rand::thread_rng().gen::<[u8; 32]>();
+		PeerId {
+			multihash: Multihash::wrap(0x0, &peer).expect("The digest size is never too large"),
+		}
+	}
+
+	/// Try to extract `PeerId` from `Multiaddr`.
+	pub fn try_from_multiaddr(address: &Multiaddr) -> Option<PeerId> {
+		match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_))) {
+			Some(Protocol::P2p(multihash)) => Some(Self { multihash }),
+			_ => None,
+		}
+	}
+
+	/// Tries to turn a `Multihash` into a `PeerId`.
+	///
+	/// If the multihash does not use a valid hashing algorithm for peer IDs,
+	/// or the hash value does not satisfy the constraints for a hashed
+	/// peer ID, it is returned as an `Err`.
+	pub fn from_multihash(multihash: Multihash) -> Result<PeerId, Multihash> {
+		match Code::try_from(multihash.code()) {
+			Ok(Code::Sha2_256) => Ok(PeerId { multihash }),
+			Ok(Code::Identity) if multihash.digest().len() <= MAX_INLINE_KEY_LENGTH =>
+				Ok(PeerId { multihash }),
+			_ => Err(multihash),
+		}
+	}
+
+	/// Parses a `PeerId` from bytes.
+	pub fn from_bytes(data: &[u8]) -> Result<PeerId, Error> {
+		PeerId::from_multihash(Multihash::from_bytes(data)?)
+			.map_err(|mh| Error::UnsupportedCode(mh.code()))
+	}
+
+	/// Returns a raw bytes representation of this `PeerId`.
+	pub fn to_bytes(&self) -> Vec<u8> {
+		self.multihash.to_bytes()
+	}
+
+	/// Returns a base-58 encoded string of this `PeerId`.
+	pub fn to_base58(&self) -> String {
+		bs58::encode(self.to_bytes()).into_string()
+	}
+
+	/// Convert `PeerId` into ed25519 public key bytes.
+	pub fn into_ed25519(&self) -> Option<[u8; 32]> {
+		let hash = &self.multihash;
+		// https://www.ietf.org/id/draft-multiformats-multihash-07.html#name-the-multihash-identifier-re
+		if hash.code() != 0 {
+			// Hash is not identity
+			return None
+		}
+
+		let public = libp2p_identity::PublicKey::try_decode_protobuf(hash.digest()).ok()?;
+		public.try_into_ed25519().ok().map(|public| public.to_bytes())
+	}
+
+	/// Get `PeerId` from ed25519 public key bytes.
+	pub fn from_ed25519(bytes: &[u8; 32]) -> Option<PeerId> {
+		let public = libp2p_identity::ed25519::PublicKey::try_from_bytes(bytes).ok()?;
+		let public: libp2p_identity::PublicKey = public.into();
+		let peer_id: libp2p_identity::PeerId = public.into();
+
+		Some(peer_id.into())
+	}
+}
+
+impl AsRef<Multihash> for PeerId {
+	fn as_ref(&self) -> &Multihash {
+		&self.multihash
+	}
+}
+
+impl From<PeerId> for Multihash {
+	fn from(peer_id: PeerId) -> Self {
+		peer_id.multihash
+	}
+}
+
+impl From<libp2p_identity::PeerId> for PeerId {
+	fn from(peer_id: libp2p_identity::PeerId) -> Self {
+		PeerId { multihash: Multihash::from_bytes(&peer_id.to_bytes()).expect("to succeed") }
+	}
+}
+
+impl From<PeerId> for libp2p_identity::PeerId {
+	fn from(peer_id: PeerId) -> Self {
+		libp2p_identity::PeerId::from_bytes(&peer_id.to_bytes()).expect("to succeed")
+	}
+}
+
+impl From<&libp2p_identity::PeerId> for PeerId {
+	fn from(peer_id: &libp2p_identity::PeerId) -> Self {
+		PeerId { multihash: Multihash::from_bytes(&peer_id.to_bytes()).expect("to succeed") }
+	}
+}
+
+impl From<&PeerId> for libp2p_identity::PeerId {
+	fn from(peer_id: &PeerId) -> Self {
+		libp2p_identity::PeerId::from_bytes(&peer_id.to_bytes()).expect("to succeed")
+	}
+}
+
+impl From<litep2p::PeerId> for PeerId {
+	fn from(peer_id: litep2p::PeerId) -> Self {
+		PeerId { multihash: Multihash::from_bytes(&peer_id.to_bytes()).expect("to succeed") }
+	}
+}
+
+impl From<PeerId> for litep2p::PeerId {
+	fn from(peer_id: PeerId) -> Self {
+		litep2p::PeerId::from_bytes(&peer_id.to_bytes()).expect("to succeed")
+	}
+}
+
+impl From<&litep2p::PeerId> for PeerId {
+	fn from(peer_id: &litep2p::PeerId) -> Self {
+		PeerId { multihash: Multihash::from_bytes(&peer_id.to_bytes()).expect("to succeed") }
+	}
+}
+
+impl From<&PeerId> for litep2p::PeerId {
+	fn from(peer_id: &PeerId) -> Self {
+		litep2p::PeerId::from_bytes(&peer_id.to_bytes()).expect("to succeed")
+	}
+}
+
+/// Error when parsing a [`PeerId`] from string or bytes.
+#[derive(Debug, thiserror::Error)]
+pub enum ParseError {
+	#[error("base-58 decode error: {0}")]
+	B58(#[from] bs58::decode::Error),
+	#[error("unsupported multihash code '{0}'")]
+	UnsupportedCode(u64),
+	#[error("invalid multihash")]
+	InvalidMultihash(#[from] multihash::Error),
+}
+
+impl FromStr for PeerId {
+	type Err = ParseError;
+
+	#[inline]
+	fn from_str(s: &str) -> Result<Self, Self::Err> {
+		let bytes = bs58::decode(s).into_vec()?;
+		let peer_id = PeerId::from_bytes(&bytes)?;
+
+		Ok(peer_id)
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn extract_peer_id_from_multiaddr() {
+		{
+			let peer = PeerId::random();
+			let address = "/ip4/198.51.100.19/tcp/30333"
+				.parse::<Multiaddr>()
+				.unwrap()
+				.with(Protocol::P2p(peer.into()));
+
+			assert_eq!(PeerId::try_from_multiaddr(&address), Some(peer));
+		}
+
+		{
+			let peer = PeerId::random();
+			assert_eq!(
+				PeerId::try_from_multiaddr(&Multiaddr::empty().with(Protocol::P2p(peer.into()))),
+				Some(peer)
+			);
+		}
+
+		{
+			assert!(PeerId::try_from_multiaddr(
+				&"/ip4/198.51.100.19/tcp/30333".parse::<Multiaddr>().unwrap()
+			)
+			.is_none());
+		}
+	}
+
+	#[test]
+	fn from_ed25519() {
+		let keypair = litep2p::crypto::ed25519::Keypair::generate();
+		let original_peer_id = litep2p::PeerId::from_public_key(
+			&litep2p::crypto::PublicKey::Ed25519(keypair.public()),
+		);
+
+		let peer_id: PeerId = original_peer_id.into();
+		assert_eq!(original_peer_id.to_bytes(), peer_id.to_bytes());
+
+		let key = peer_id.into_ed25519().unwrap();
+		assert_eq!(PeerId::from_ed25519(&key).unwrap(), original_peer_id.into());
+	}
+}
diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml
index a3a3cfaa8fca1afb74a21c00bdf71abf11ff4cc7..b834241fe5a7e1b92b1683a92689b6d3de2a2995 100644
--- a/substrate/client/offchain/Cargo.toml
+++ b/substrate/client/offchain/Cargo.toml
@@ -34,6 +34,7 @@ tracing = "0.1.29"
 sc-client-api = { path = "../api" }
 sc-network = { path = "../network" }
 sc-network-common = { path = "../network/common" }
+sc-network-types = { path = "../network/types" }
 sc-transaction-pool-api = { path = "../transaction-pool/api" }
 sc-utils = { path = "../utils" }
 sp-api = { path = "../../primitives/api" }
@@ -45,6 +46,7 @@ sp-externalities = { path = "../../primitives/externalities" }
 log = { workspace = true, default-features = true }
 
 [dev-dependencies]
+async-trait = "0.1"
 lazy_static = "1.4.0"
 tokio = "1.37"
 sc-block-builder = { path = "../block-builder" }
diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs
index 65e2f3ba64dbec30f5d5a944d5cb4f357c76e756..19ccdbcf498f4a7a53cc77cf1936da584238076d 100644
--- a/substrate/client/offchain/src/api.rs
+++ b/substrate/client/offchain/src/api.rs
@@ -22,7 +22,8 @@ use crate::NetworkProvider;
 use codec::{Decode, Encode};
 use futures::Future;
 pub use http::SharedClient;
-use libp2p::{Multiaddr, PeerId};
+use sc_network::Multiaddr;
+use sc_network_types::PeerId;
 use sp_core::{
 	offchain::{
 		self, HttpError, HttpRequestId, HttpRequestStatus, OpaqueMultiaddr, OpaqueNetworkState,
@@ -229,6 +230,7 @@ mod tests {
 
 	pub(super) struct TestNetwork();
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for TestNetwork {
 		fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
 			unimplemented!();
@@ -301,6 +303,10 @@ mod tests {
 		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
 			None
 		}
+
+		async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
+			unimplemented!();
+		}
 	}
 
 	impl NetworkStateInfo for TestNetwork {
diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs
index eb3436432f342f4de97a5325c7a14ea4ce167581..48d3b8f1393a430b8f07031775e4d83b063400b2 100644
--- a/substrate/client/offchain/src/lib.rs
+++ b/substrate/client/offchain/src/lib.rs
@@ -327,12 +327,12 @@ where
 mod tests {
 	use super::*;
 	use futures::executor::block_on;
-	use libp2p::{Multiaddr, PeerId};
 	use sc_block_builder::BlockBuilderBuilder;
 	use sc_client_api::Backend as _;
 	use sc_network::{
-		config::MultiaddrWithPeerId, types::ProtocolName, ObservedRole, ReputationChange,
+		config::MultiaddrWithPeerId, types::ProtocolName, Multiaddr, ObservedRole, ReputationChange,
 	};
+	use sc_network_types::PeerId;
 	use sc_transaction_pool::BasicPool;
 	use sc_transaction_pool_api::{InPoolTransaction, TransactionPool};
 	use sp_consensus::BlockOrigin;
@@ -361,6 +361,7 @@ mod tests {
 		}
 	}
 
+	#[async_trait::async_trait]
 	impl NetworkPeers for TestNetwork {
 		fn set_authorized_peers(&self, _peers: HashSet<PeerId>) {
 			unimplemented!();
@@ -433,6 +434,10 @@ mod tests {
 		fn peer_role(&self, _peer_id: PeerId, _handshake: Vec<u8>) -> Option<ObservedRole> {
 			None
 		}
+
+		async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
+			unimplemented!();
+		}
 	}
 
 	#[test]
diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml
index b81f2e2f55ab3ffd69d9e48e2a8a8f13440a6f3f..b93196e86f1d3674b2894a21783e269831a27656 100644
--- a/substrate/client/service/Cargo.toml
+++ b/substrate/client/service/Cargo.toml
@@ -54,10 +54,10 @@ sp-consensus = { path = "../../primitives/consensus/common" }
 sc-consensus = { path = "../consensus/common" }
 sp-storage = { path = "../../primitives/storage" }
 sc-network = { path = "../network" }
-sc-network-bitswap = { path = "../network/bitswap" }
 sc-network-common = { path = "../network/common" }
 sc-network-light = { path = "../network/light" }
 sc-network-sync = { path = "../network/sync" }
+sc-network-types = { path = "../network/types" }
 sc-network-transactions = { path = "../network/transactions" }
 sc-chain-spec = { path = "../chain-spec" }
 sc-client-api = { path = "../api" }
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index e71313428daf5acbe6d9837697191aa4d792a39d..830f9884719dcf1fd1b024906f830b18a154a323 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -43,10 +43,12 @@ use sc_executor::{
 use sc_keystore::LocalKeystore;
 use sc_network::{
 	config::{FullNetworkConfiguration, SyncMode},
-	peer_store::PeerStore,
-	NetworkService, NetworkStateInfo, NetworkStatusProvider,
+	service::{
+		traits::{PeerStore, RequestResponseConfig},
+		NotificationMetrics,
+	},
+	NetworkBackend, NetworkStateInfo,
 };
-use sc_network_bitswap::BitswapRequestHandler;
 use sc_network_common::role::Roles;
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
@@ -342,19 +344,6 @@ where
 	)
 }
 
-/// Shared network instance implementing a set of mandatory traits.
-pub trait SpawnTaskNetwork<Block: BlockT>:
-	NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static
-{
-}
-
-impl<T, Block> SpawnTaskNetwork<Block> for T
-where
-	Block: BlockT,
-	T: NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static,
-{
-}
-
 /// Parameters to pass into `build`.
 pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
 	/// The service configuration.
@@ -373,7 +362,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
 	pub rpc_builder:
 		Box<dyn Fn(DenyUnsafe, SubscriptionTaskExecutor) -> Result<RpcModule<TRpc>, Error>>,
 	/// A shared network instance.
-	pub network: Arc<dyn SpawnTaskNetwork<TBl>>,
+	pub network: Arc<dyn sc_network::service::traits::NetworkService>,
 	/// A Sender for RPC requests.
 	pub system_rpc_tx: TracingUnboundedSender<sc_rpc::system::Request<TBl>>,
 	/// Controller for transactions handlers
@@ -736,11 +725,18 @@ where
 }
 
 /// Parameters to pass into `build_network`.
-pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> {
+pub struct BuildNetworkParams<
+	'a,
+	TBl: BlockT,
+	TNet: NetworkBackend<TBl, <TBl as BlockT>::Hash>,
+	TExPool,
+	TImpQu,
+	TCl,
+> {
 	/// The service configuration.
 	pub config: &'a Configuration,
 	/// Full network configuration.
-	pub net_config: FullNetworkConfiguration,
+	pub net_config: FullNetworkConfiguration<TBl, <TBl as BlockT>::Hash, TNet>,
 	/// A shared client returned by `new_full_parts`.
 	pub client: Arc<TCl>,
 	/// A shared transaction pool.
@@ -756,15 +752,17 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> {
 	pub warp_sync_params: Option<WarpSyncParams<TBl>>,
 	/// User specified block relay params. If not specified, the default
 	/// block request handler will be used.
-	pub block_relay: Option<BlockRelayParams<TBl>>,
+	pub block_relay: Option<BlockRelayParams<TBl, TNet>>,
+	/// Metrics.
+	pub metrics: NotificationMetrics,
 }
 
 /// Build the network service, the network status sinks and an RPC sender.
-pub fn build_network<TBl, TExPool, TImpQu, TCl>(
-	params: BuildNetworkParams<TBl, TExPool, TImpQu, TCl>,
+pub fn build_network<TBl, TNet, TExPool, TImpQu, TCl>(
+	params: BuildNetworkParams<TBl, TNet, TExPool, TImpQu, TCl>,
 ) -> Result<
 	(
-		Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
+		Arc<dyn sc_network::service::traits::NetworkService>,
 		TracingUnboundedSender<sc_rpc::system::Request<TBl>>,
 		sc_network_transactions::TransactionsHandlerController<<TBl as BlockT>::Hash>,
 		NetworkStarter,
@@ -785,6 +783,7 @@ where
 		+ 'static,
 	TExPool: TransactionPool<Block = TBl, Hash = <TBl as BlockT>::Hash> + 'static,
 	TImpQu: ImportQueue<TBl> + 'static,
+	TNet: NetworkBackend<TBl, <TBl as BlockT>::Hash>,
 {
 	let BuildNetworkParams {
 		config,
@@ -796,6 +795,7 @@ where
 		block_announce_validator_builder,
 		warp_sync_params,
 		block_relay,
+		metrics,
 	} = params;
 
 	if warp_sync_params.is_none() && config.network.sync_mode.is_warp() {
@@ -830,7 +830,7 @@ where
 		None => {
 			// Custom protocol was not specified, use the default block handler.
 			// Allow both outgoing and incoming requests.
-			let params = BlockRequestHandler::new(
+			let params = BlockRequestHandler::new::<TNet>(
 				chain_sync_network_handle.clone(),
 				&protocol_id,
 				config.chain_spec.fork_id(),
@@ -849,13 +849,13 @@ where
 		let num_peer_hint = net_config.network_config.default_peers_set_num_full as usize +
 			net_config.network_config.default_peers_set.reserved_nodes.len();
 		// Allow both outgoing and incoming requests.
-		let (handler, protocol_config) = StateRequestHandler::new(
+		let (handler, protocol_config) = StateRequestHandler::new::<TNet>(
 			&protocol_id,
 			config.chain_spec.fork_id(),
 			client.clone(),
 			num_peer_hint,
 		);
-		let config_name = protocol_config.name.clone();
+		let config_name = protocol_config.protocol_name().clone();
 
 		spawn_handle.spawn("state-request-handler", Some("networking"), handler.run());
 		(protocol_config, config_name)
@@ -864,13 +864,13 @@ where
 	let (warp_sync_protocol_config, warp_request_protocol_name) = match warp_sync_params.as_ref() {
 		Some(WarpSyncParams::WithProvider(warp_with_provider)) => {
 			// Allow both outgoing and incoming requests.
-			let (handler, protocol_config) = WarpSyncRequestHandler::new(
+			let (handler, protocol_config) = WarpSyncRequestHandler::new::<_, TNet>(
 				protocol_id.clone(),
 				genesis_hash,
 				config.chain_spec.fork_id(),
 				warp_with_provider.clone(),
 			);
-			let config_name = protocol_config.name.clone();
+			let config_name = protocol_config.protocol_name().clone();
 
 			spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run());
 			(Some(protocol_config), Some(config_name))
@@ -880,7 +880,7 @@ where
 
 	let light_client_request_protocol_config = {
 		// Allow both outgoing and incoming requests.
-		let (handler, protocol_config) = LightClientRequestHandler::new(
+		let (handler, protocol_config) = LightClientRequestHandler::new::<TNet>(
 			&protocol_id,
 			config.chain_spec.fork_id(),
 			client.clone(),
@@ -898,30 +898,27 @@ where
 		net_config.add_request_response_protocol(config);
 	}
 
-	if config.network.ipfs_server {
-		let (handler, protocol_config) = BitswapRequestHandler::new(client.clone());
-		spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run());
-		net_config.add_request_response_protocol(protocol_config);
-	}
+	let bitswap_config = config.network.ipfs_server.then(|| {
+		let (handler, config) = TNet::bitswap_server(client.clone());
+		spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler);
+
+		config
+	});
 
 	// create transactions protocol and add it to the list of supported protocols of
+	let peer_store_handle = net_config.peer_store_handle();
 	let (transactions_handler_proto, transactions_config) =
-		sc_network_transactions::TransactionsHandlerPrototype::new(
+		sc_network_transactions::TransactionsHandlerPrototype::new::<_, TBl, TNet>(
 			protocol_id.clone(),
 			genesis_hash,
 			config.chain_spec.fork_id(),
+			metrics.clone(),
+			Arc::clone(&peer_store_handle),
 		);
 	net_config.add_notification_protocol(transactions_config);
 
-	// Create `PeerStore` and initialize it with bootnode peer ids.
-	let peer_store = PeerStore::new(
-		net_config
-			.network_config
-			.boot_nodes
-			.iter()
-			.map(|bootnode| bootnode.peer_id)
-			.collect(),
-	);
+	// Start task for `PeerStore`
+	let peer_store = net_config.take_peer_store();
 	let peer_store_handle = peer_store.handle();
 	spawn_handle.spawn("peer-store", Some("networking"), peer_store.run());
 
@@ -929,6 +926,7 @@ where
 		Roles::from(&config.role),
 		client.clone(),
 		config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(),
+		metrics.clone(),
 		&net_config,
 		protocol_id.clone(),
 		&config.chain_spec.fork_id().map(ToOwned::to_owned),
@@ -939,13 +937,13 @@ where
 		block_downloader,
 		state_request_protocol_name,
 		warp_request_protocol_name,
-		peer_store_handle.clone(),
+		Arc::clone(&peer_store_handle),
 	)?;
 	let sync_service_import_queue = sync_service.clone();
 	let sync_service = Arc::new(sync_service);
 
 	let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
-	let network_params = sc_network::config::Params::<TBl> {
+	let network_params = sc_network::config::Params::<TBl, <TBl as BlockT>::Hash, TNet> {
 		role: config.role.clone(),
 		executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
@@ -954,17 +952,18 @@ where
 			})
 		},
 		network_config: net_config,
-		peer_store: peer_store_handle,
 		genesis_hash,
 		protocol_id: protocol_id.clone(),
 		fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned),
 		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
 		block_announce_config,
+		bitswap_config,
+		notification_metrics: metrics,
 	};
 
 	let has_bootnodes = !network_params.network_config.network_config.boot_nodes.is_empty();
-	let network_mut = sc_network::NetworkWorker::new(network_params)?;
-	let network = network_mut.service().clone();
+	let network_mut = TNet::new(network_params)?;
+	let network = network_mut.network_service().clone();
 
 	let (tx_handler, tx_handler_controller) = transactions_handler_proto.build(
 		network.clone(),
@@ -972,12 +971,16 @@ where
 		Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }),
 		config.prometheus_config.as_ref().map(|config| &config.registry),
 	)?;
-	spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run());
+	spawn_handle.spawn_blocking(
+		"network-transactions-handler",
+		Some("networking"),
+		tx_handler.run(),
+	);
 
 	spawn_handle.spawn_blocking(
 		"chain-sync-network-service-provider",
 		Some("networking"),
-		chain_sync_network_provider.run(network.clone()),
+		chain_sync_network_provider.run(Arc::new(network.clone())),
 	);
 	spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue)));
 	spawn_handle.spawn_blocking("syncing", None, engine.run());
@@ -986,9 +989,9 @@ where
 	spawn_handle.spawn(
 		"system-rpc-handler",
 		Some("networking"),
-		build_system_rpc_future(
+		build_system_rpc_future::<_, _, <TBl as BlockT>::Hash>(
 			config.role.clone(),
-			network_mut.service().clone(),
+			network_mut.network_service(),
 			sync_service.clone(),
 			client.clone(),
 			system_rpc_rx,
@@ -996,18 +999,22 @@ where
 		),
 	);
 
-	let future =
-		build_network_future(network_mut, client, sync_service.clone(), config.announce_block);
+	let future = build_network_future::<_, _, <TBl as BlockT>::Hash, _>(
+		network_mut,
+		client,
+		sync_service.clone(),
+		config.announce_block,
+	);
 
 	// TODO: Normally, one is supposed to pass a list of notifications protocols supported by the
 	// node through the `NetworkConfiguration` struct. But because this function doesn't know in
 	// advance which components, such as GrandPa or Polkadot, will be plugged on top of the
-	// service, it is unfortunately not possible to do so without some deep refactoring. To bypass
-	// this problem, the `NetworkService` provides a `register_notifications_protocol` method that
-	// can be called even after the network has been initialized. However, we want to avoid the
-	// situation where `register_notifications_protocol` is called *after* the network actually
-	// connects to other peers. For this reason, we delay the process of the network future until
-	// the user calls `NetworkStarter::start_network`.
+	// service, it is unfortunately not possible to do so without some deep refactoring. To
+	// bypass this problem, the `NetworkService` provides a `register_notifications_protocol`
+	// method that can be called even after the network has been initialized. However, we want to
+	// avoid the situation where `register_notifications_protocol` is called *after* the network
+	// actually connects to other peers. For this reason, we delay the process of the network
+	// future until the user calls `NetworkStarter::start_network`.
 	//
 	// This entire hack should eventually be removed in favour of passing the list of protocols
 	// through the configuration.
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index 9480d4a0b07276a5131be17579964c04abda1f1d..e46cfab50a3ebdcb4e6eb8f746ec27751cd77847 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -42,9 +42,11 @@ use jsonrpsee::RpcModule;
 use log::{debug, error, warn};
 use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider};
 use sc_network::{
-	config::MultiaddrWithPeerId, NetworkBlock, NetworkPeers, NetworkStateInfo, PeerId,
+	config::MultiaddrWithPeerId, service::traits::NetworkService, NetworkBackend, NetworkBlock,
+	NetworkPeers, NetworkStateInfo,
 };
 use sc_network_sync::SyncingService;
+use sc_network_types::PeerId;
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_blockchain::HeaderMetadata;
 use sp_consensus::SyncOracle;
@@ -157,8 +159,9 @@ async fn build_network_future<
 		+ Sync
 		+ 'static,
 	H: sc_network_common::ExHashT,
+	N: NetworkBackend<B, <B as BlockT>::Hash>,
 >(
-	network: sc_network::NetworkWorker<B, H>,
+	network: N,
 	client: Arc<C>,
 	sync_service: Arc<SyncingService<B>>,
 	announce_imported_blocks: bool,
@@ -225,7 +228,7 @@ pub async fn build_system_rpc_future<
 	H: sc_network_common::ExHashT,
 >(
 	role: Role,
-	network_service: Arc<sc_network::NetworkService<B, H>>,
+	network_service: Arc<dyn NetworkService>,
 	sync_service: Arc<SyncingService<B>>,
 	client: Arc<C>,
 	mut rpc_rx: TracingUnboundedReceiver<sc_rpc::system::Request<B>>,
@@ -310,14 +313,12 @@ pub async fn build_system_rpc_future<
 				};
 			},
 			sc_rpc::system::Request::NetworkReservedPeers(sender) => {
-				let reserved_peers = network_service.reserved_peers().await;
-				if let Ok(reserved_peers) = reserved_peers {
-					let reserved_peers =
-						reserved_peers.iter().map(|peer_id| peer_id.to_base58()).collect();
-					let _ = sender.send(reserved_peers);
-				} else {
-					break
-				}
+				let Ok(reserved_peers) = network_service.reserved_peers().await else {
+					break;
+				};
+
+				let _ =
+					sender.send(reserved_peers.iter().map(|peer_id| peer_id.to_base58()).collect());
 			},
 			sc_rpc::system::Request::NodeRoles(sender) => {
 				use sc_rpc::system::NodeRole;
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 349538965ee1fa04bd0acfff42b517ae40452c1c..b9abd8446f7dd6b24f12813b6716d4697f2bdf19 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -73,9 +73,7 @@ pub trait TestNetNode: Clone + Future<Output = Result<(), Error>> + Send + 'stat
 
 	fn client(&self) -> Arc<Client<Self::Backend, Self::Executor, Self::Block, Self::RuntimeApi>>;
 	fn transaction_pool(&self) -> Arc<Self::TransactionPool>;
-	fn network(
-		&self,
-	) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>>;
+	fn network(&self) -> Arc<dyn sc_network::service::traits::NetworkService>;
 	fn sync(&self) -> &Arc<SyncingService<Self::Block>>;
 	fn spawn_handle(&self) -> SpawnTaskHandle;
 }
@@ -84,7 +82,7 @@ pub struct TestNetComponents<TBl: BlockT, TBackend, TExec, TRtApi, TExPool> {
 	task_manager: Arc<Mutex<TaskManager>>,
 	client: Arc<Client<TBackend, TExec, TBl, TRtApi>>,
 	transaction_pool: Arc<TExPool>,
-	network: Arc<sc_network::NetworkService<TBl, <TBl as BlockT>::Hash>>,
+	network: Arc<dyn sc_network::service::traits::NetworkService>,
 	sync: Arc<SyncingService<TBl>>,
 }
 
@@ -94,7 +92,7 @@ impl<TBl: BlockT, TBackend, TExec, TRtApi, TExPool>
 	pub fn new(
 		task_manager: TaskManager,
 		client: Arc<Client<TBackend, TExec, TBl, TRtApi>>,
-		network: Arc<sc_network::NetworkService<TBl, <TBl as BlockT>::Hash>>,
+		network: Arc<dyn sc_network::service::traits::NetworkService>,
 		sync: Arc<SyncingService<TBl>>,
 		transaction_pool: Arc<TExPool>,
 	) -> Self {
@@ -153,9 +151,7 @@ where
 	fn transaction_pool(&self) -> Arc<Self::TransactionPool> {
 		self.transaction_pool.clone()
 	}
-	fn network(
-		&self,
-	) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>> {
+	fn network(&self) -> Arc<dyn sc_network::service::traits::NetworkService> {
 		self.network.clone()
 	}
 	fn sync(&self) -> &Arc<SyncingService<Self::Block>> {
diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml
index 9a29a33a591f4e0491673b9ef3d7f86e0546220d..0cce2acf6409c1a1a179d60d31b997469e8fd87e 100644
--- a/substrate/client/telemetry/Cargo.toml
+++ b/substrate/client/telemetry/Cargo.toml
@@ -24,6 +24,7 @@ log = { workspace = true, default-features = true }
 parking_lot = "0.12.1"
 pin-project = "1.0.12"
 sc-utils = { path = "../utils" }
+sc-network = { path = "../network" }
 rand = "0.8.5"
 serde = { features = ["derive"], workspace = true, default-features = true }
 serde_json = { workspace = true, default-features = true }
diff --git a/substrate/client/telemetry/src/endpoints.rs b/substrate/client/telemetry/src/endpoints.rs
index a4f0d0f83d617c2108a0bd3130c2229db7c5b209..c7a60726a5656108dfb588d96c8ede33bb4de391 100644
--- a/substrate/client/telemetry/src/endpoints.rs
+++ b/substrate/client/telemetry/src/endpoints.rs
@@ -16,7 +16,7 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use libp2p::Multiaddr;
+use sc_network::{multiaddr, Multiaddr};
 use serde::{Deserialize, Deserializer, Serialize};
 
 /// List of telemetry servers we want to talk to. Contains the URL of the server, and the
@@ -41,8 +41,8 @@ where
 
 impl TelemetryEndpoints {
 	/// Create a `TelemetryEndpoints` based on a list of `(String, u8)`.
-	pub fn new(endpoints: Vec<(String, u8)>) -> Result<Self, libp2p::multiaddr::Error> {
-		let endpoints: Result<Vec<(Multiaddr, u8)>, libp2p::multiaddr::Error> =
+	pub fn new(endpoints: Vec<(String, u8)>) -> Result<Self, multiaddr::Error> {
+		let endpoints: Result<Vec<(Multiaddr, u8)>, multiaddr::Error> =
 			endpoints.iter().map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))).collect();
 		endpoints.map(Self)
 	}
@@ -56,7 +56,7 @@ impl TelemetryEndpoints {
 }
 
 /// Parses a WebSocket URL into a libp2p `Multiaddr`.
-fn url_to_multiaddr(url: &str) -> Result<Multiaddr, libp2p::multiaddr::Error> {
+fn url_to_multiaddr(url: &str) -> Result<Multiaddr, multiaddr::Error> {
 	// First, assume that we have a `Multiaddr`.
 	let parse_error = match url.parse() {
 		Ok(ma) => return Ok(ma),
@@ -64,7 +64,7 @@ fn url_to_multiaddr(url: &str) -> Result<Multiaddr, libp2p::multiaddr::Error> {
 	};
 
 	// If not, try the `ws://path/url` format.
-	if let Ok(ma) = libp2p::multiaddr::from_url(url) {
+	if let Ok(ma) = multiaddr::from_url(url) {
 		return Ok(ma)
 	}
 
@@ -75,8 +75,7 @@ fn url_to_multiaddr(url: &str) -> Result<Multiaddr, libp2p::multiaddr::Error> {
 
 #[cfg(test)]
 mod tests {
-	use super::{url_to_multiaddr, TelemetryEndpoints};
-	use libp2p::Multiaddr;
+	use super::{url_to_multiaddr, Multiaddr, TelemetryEndpoints};
 
 	#[test]
 	fn valid_endpoints() {
diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs
index 7e3a4ee8639308bd4678288db583e31c5085af5b..f8a201e7611c2fa50a871e663e5f6c21f76ba07a 100644
--- a/substrate/client/telemetry/src/lib.rs
+++ b/substrate/client/telemetry/src/lib.rs
@@ -37,9 +37,9 @@
 #![warn(missing_docs)]
 
 use futures::{channel::mpsc, prelude::*};
-use libp2p::Multiaddr;
 use log::{error, warn};
 use parking_lot::Mutex;
+use sc_network::Multiaddr;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use serde::Serialize;
 use std::{
diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs
index 0bbdbfb622ef1ba478b62174d277e49ac836ff64..9b2443799d3deb23967973aa6670c6d92ea0bbfd 100644
--- a/substrate/client/telemetry/src/node.rs
+++ b/substrate/client/telemetry/src/node.rs
@@ -18,8 +18,9 @@
 
 use crate::TelemetryPayload;
 use futures::{channel::mpsc, prelude::*};
-use libp2p::{core::transport::Transport, Multiaddr};
+use libp2p::core::transport::Transport;
 use rand::Rng as _;
+use sc_network::Multiaddr;
 use std::{
 	fmt, mem,
 	pin::Pin,
diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml
index b1dbf773e31f5297f42da64795a0cb5388d986ef..2e1701f3c60da3d2bebbafdbf1290a8e37bac859 100644
--- a/substrate/scripts/ci/deny.toml
+++ b/substrate/scripts/ci/deny.toml
@@ -69,7 +69,6 @@ exceptions = [
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" },
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" },
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" },
-	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" },
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" },
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" },
 	{ allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" },
diff --git a/templates/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs
index a985370c2d87f4d0e86191a79bd88db484953355..432add922a7b585bba9be071a0a82dcd34e02dd7 100644
--- a/templates/minimal/node/src/command.rs
+++ b/templates/minimal/node/src/command.rs
@@ -119,7 +119,15 @@ pub fn run() -> sc_cli::Result<()> {
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
 			runner.run_node_until_exit(|config| async move {
-				service::new_full(config, cli.consensus).map_err(sc_cli::Error::Service)
+				match config.network.network_backend {
+					sc_network::config::NetworkBackendType::Libp2p =>
+						service::new_full::<sc_network::NetworkWorker<_, _>>(config, cli.consensus)
+							.map_err(sc_cli::Error::Service),
+					sc_network::config::NetworkBackendType::Litep2p => service::new_full::<
+						sc_network::Litep2pNetworkBackend,
+					>(config, cli.consensus)
+					.map_err(sc_cli::Error::Service),
+				}
 			})
 		},
 	}
diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs
index 08db8b5936148c45870489ae11b0c9263078a8f6..d84df95dc1924edf8077783aed4f2ce80a803f14 100644
--- a/templates/minimal/node/src/service.rs
+++ b/templates/minimal/node/src/service.rs
@@ -22,6 +22,7 @@ use sc_executor::WasmExecutor;
 use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryWorker};
 use sc_transaction_pool_api::OffchainTransactionPoolFactory;
+use sp_runtime::traits::Block as BlockT;
 use std::sync::Arc;
 
 use crate::cli::Consensus;
@@ -104,7 +105,10 @@ pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
 }
 
 /// Builds a new service for a full client.
-pub fn new_full(config: Configuration, consensus: Consensus) -> Result<TaskManager, ServiceError> {
+pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>>(
+	config: Configuration,
+	consensus: Consensus,
+) -> Result<TaskManager, ServiceError> {
 	let sc_service::PartialComponents {
 		client,
 		backend,
@@ -116,7 +120,14 @@ pub fn new_full(config: Configuration, consensus: Consensus) -> Result<TaskManag
 		other: mut telemetry,
 	} = new_partial(&config)?;
 
-	let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
+	let net_config = sc_network::config::FullNetworkConfiguration::<
+		Block,
+		<Block as BlockT>::Hash,
+		Network,
+	>::new(&config.network);
+	let metrics = Network::register_notification_metrics(
+		config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+	);
 
 	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
@@ -129,6 +140,7 @@ pub fn new_full(config: Configuration, consensus: Consensus) -> Result<TaskManag
 			block_announce_validator_builder: None,
 			warp_sync_params: None,
 			block_relay: None,
+			metrics,
 		})?;
 
 	if config.offchain_worker.enabled {
@@ -143,7 +155,7 @@ pub fn new_full(config: Configuration, consensus: Consensus) -> Result<TaskManag
 				transaction_pool: Some(OffchainTransactionPoolFactory::new(
 					transaction_pool.clone(),
 				)),
-				network_provider: network.clone(),
+				network_provider: Arc::new(network.clone()),
 				enable_http_requests: true,
 				custom_extensions: |_| vec![],
 			})
diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs
index 7e7bf1726b5ef1db09c2fa321f1acea5dd0ae1cc..5147eae2ecd302e69c45db2187d2ddd7bf582673 100644
--- a/templates/parachain/node/src/service.rs
+++ b/templates/parachain/node/src/service.rs
@@ -27,7 +27,7 @@ use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
 use sc_client_api::Backend;
 use sc_consensus::ImportQueue;
 use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
-use sc_network::NetworkBlock;
+use sc_network::{NetworkBackend, NetworkBlock};
 use sc_network_sync::SyncingService;
 use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
@@ -131,7 +131,7 @@ pub fn new_partial(config: &Configuration) -> Result<Service, sc_service::Error>
 ///
 /// This is the actual implementation that is abstract over the executor and the runtime api.
 #[sc_tracing::logging::prefix_logs_with("Parachain")]
-async fn start_node_impl(
+async fn start_node_impl<N: NetworkBackend<Block, Hash>>(
 	parachain_config: Configuration,
 	polkadot_config: Configuration,
 	collator_options: CollatorOptions,
@@ -142,7 +142,8 @@ async fn start_node_impl(
 
 	let params = new_partial(&parachain_config)?;
 	let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
-	let net_config = sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);
+	let net_config =
+		sc_network::config::FullNetworkConfiguration::<_, _, N>::new(&parachain_config.network);
 
 	let client = params.client.clone();
 	let backend = params.backend.clone();
@@ -191,7 +192,7 @@ async fn start_node_impl(
 				transaction_pool: Some(OffchainTransactionPoolFactory::new(
 					transaction_pool.clone(),
 				)),
-				network_provider: network.clone(),
+				network_provider: Arc::new(network.clone()),
 				is_validator: parachain_config.role.is_authority(),
 				enable_http_requests: false,
 				custom_extensions: move |_| vec![],
@@ -416,5 +417,24 @@ pub async fn start_parachain_node(
 	para_id: ParaId,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient>)> {
-	start_node_impl(parachain_config, polkadot_config, collator_options, para_id, hwbench).await
+	match polkadot_config.network.network_backend {
+		sc_network::config::NetworkBackendType::Libp2p =>
+			start_node_impl::<sc_network::NetworkWorker<_, _>>(
+				parachain_config,
+				polkadot_config,
+				collator_options,
+				para_id,
+				hwbench,
+			)
+			.await,
+		sc_network::config::NetworkBackendType::Litep2p =>
+			start_node_impl::<sc_network::Litep2pNetworkBackend>(
+				parachain_config,
+				polkadot_config,
+				collator_options,
+				para_id,
+				hwbench,
+			)
+			.await,
+	}
 }
diff --git a/templates/solochain/node/src/command.rs b/templates/solochain/node/src/command.rs
index 1b831f9cbbfd8a437fe003664155401605adccd3..7f6df42fb0f9768d6e116dd6d1285c7169aa3694 100644
--- a/templates/solochain/node/src/command.rs
+++ b/templates/solochain/node/src/command.rs
@@ -183,7 +183,18 @@ pub fn run() -> sc_cli::Result<()> {
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
 			runner.run_node_until_exit(|config| async move {
-				service::new_full(config).map_err(sc_cli::Error::Service)
+				match config.network.network_backend {
+					sc_network::config::NetworkBackendType::Libp2p => service::new_full::<
+						sc_network::NetworkWorker<
+							solochain_template_runtime::opaque::Block,
+							<solochain_template_runtime::opaque::Block as sp_runtime::traits::Block>::Hash,
+						>,
+					>(config)
+					.map_err(sc_cli::Error::Service),
+					sc_network::config::NetworkBackendType::Litep2p =>
+						service::new_full::<sc_network::Litep2pNetworkBackend>(config)
+							.map_err(sc_cli::Error::Service),
+				}
 			})
 		},
 	}
diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs
index dc25f7579129fe3ea86d0686732be5e6baec40ba..06d4b8ab7a59b936612eacec1a278e61161ecf3b 100644
--- a/templates/solochain/node/src/service.rs
+++ b/templates/solochain/node/src/service.rs
@@ -124,7 +124,11 @@ pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
 }
 
 /// Builds a new service for a full client.
-pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
+pub fn new_full<
+	N: sc_network::NetworkBackend<Block, <Block as sp_runtime::traits::Block>::Hash>,
+>(
+	config: Configuration,
+) -> Result<TaskManager, ServiceError> {
 	let sc_service::PartialComponents {
 		client,
 		backend,
@@ -136,14 +140,24 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 		other: (block_import, grandpa_link, mut telemetry),
 	} = new_partial(&config)?;
 
-	let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
+	let mut net_config = sc_network::config::FullNetworkConfiguration::<
+		Block,
+		<Block as sp_runtime::traits::Block>::Hash,
+		N,
+	>::new(&config.network);
+	let metrics = N::register_notification_metrics(config.prometheus_registry());
 
+	let peer_store_handle = net_config.peer_store_handle();
 	let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
 		&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
 		&config.chain_spec,
 	);
 	let (grandpa_protocol_config, grandpa_notification_service) =
-		sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone());
+		sc_consensus_grandpa::grandpa_peers_set_config::<_, N>(
+			grandpa_protocol_name.clone(),
+			metrics.clone(),
+			peer_store_handle,
+		);
 	net_config.add_notification_protocol(grandpa_protocol_config);
 
 	let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
@@ -163,6 +177,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 			block_announce_validator_builder: None,
 			warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
 			block_relay: None,
+			metrics,
 		})?;
 
 	if config.offchain_worker.enabled {
@@ -177,7 +192,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 				transaction_pool: Some(OffchainTransactionPoolFactory::new(
 					transaction_pool.clone(),
 				)),
-				network_provider: network.clone(),
+				network_provider: Arc::new(network.clone()),
 				enable_http_requests: true,
 				custom_extensions: |_| vec![],
 			})
@@ -205,7 +220,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 	};
 
 	let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
-		network: network.clone(),
+		network: Arc::new(network.clone()),
 		client: client.clone(),
 		keystore: keystore_container.keystore(),
 		task_manager: &mut task_manager,