From 1a7f5be07f272155b0347a48d50b6b90ee6264cb Mon Sep 17 00:00:00 2001
From: Aaro Altonen <48052676+altonen@users.noreply.github.com>
Date: Mon, 6 Mar 2023 18:33:38 +0200
Subject: [PATCH] Extract syncing protocol from `sc-network` (#12828)

* Move import queue out of `sc-network`

Add supplementary asynchronous API for the import queue which means
it can be run as an independent task and communicated with through
the `ImportQueueService`.

This commit removes removes block and justification imports from
`sc-network` and provides `ChainSync` with a handle to import queue so
it can import blocks and justifications. Polling of the import queue is
moved complete out of `sc-network` and `sc_consensus::Link` is
implemented for `ChainSyncInterfaceHandled` so the import queue
can still influence the syncing process.

* Move stuff to SyncingEngine

* Move `ChainSync` instanation to `SyncingEngine`

Some of the tests have to be rewritten

* Move peer hashmap to `SyncingEngine`

* Let `SyncingEngine` to implement `ChainSyncInterface`

* Introduce `SyncStatusProvider`

* Move `sync_peer_(connected|disconnected)` to `SyncingEngine`

* Implement `SyncEventStream`

Remove `SyncConnected`/`SyncDisconnected` events from
`NetworkEvenStream` and provide those events through
`ChainSyncInterface` instead.

Modify BEEFY/GRANDPA/transactions protocol and `NetworkGossip` to take
`SyncEventStream` object which they listen to for incoming sync peer
events.

* Introduce `ChainSyncInterface`

This interface provides a set of miscellaneous functions that other
subsystems can use to query, for example, the syncing status.

* Move event stream polling to `SyncingEngine`

Subscribe to `NetworkStreamEvent` and poll the incoming notifications
and substream events from `SyncingEngine`.

The code needs refactoring.

* Make `SyncingEngine` into an asynchronous runner

This commits removes the last hard dependency of syncing from
`sc-network` meaning the protocol now lives completely outside of
`sc-network`, ignoring the hardcoded peerset entry which will be
addressed in the future.

Code needs a lot of refactoring.

* Fix warnings

* Code refactoring

* Use `SyncingService` for BEEFY

* Use `SyncingService` for GRANDPA

* Remove call delegation from `NetworkService`

* Remove `ChainSyncService`

* Remove `ChainSync` service tests

They were written for the sole purpose of verifying that `NetworWorker`
continues to function while the calls are being dispatched to
`ChainSync`.

* Refactor code

* Refactor code

* Update client/finality-grandpa/src/communication/tests.rs

Co-authored-by: Anton <anton.kalyaev@gmail.com>

* Fix warnings

* Apply review comments

* Fix docs

* Fix test

* cargo-fmt

* Update client/network/sync/src/engine.rs

Co-authored-by: Anton <anton.kalyaev@gmail.com>

* Update client/network/sync/src/engine.rs

Co-authored-by: Anton <anton.kalyaev@gmail.com>

* Add missing docs

* Refactor code

---------

Co-authored-by: Anton <anton.kalyaev@gmail.com>
---
 substrate/Cargo.lock                          | 666 ++++++++-----
 .../bin/node-template/node/src/service.rs     |   8 +-
 substrate/bin/node/cli/Cargo.toml             |   1 +
 substrate/bin/node/cli/src/chain_spec.rs      |   3 +-
 substrate/bin/node/cli/src/service.rs         |  26 +-
 substrate/client/cli/src/arg_enums.rs         |  20 +-
 .../client/cli/src/params/network_params.rs   |   7 +-
 .../client/cli/src/params/node_key_params.rs  |  14 +-
 substrate/client/consensus/aura/src/lib.rs    |   5 +
 substrate/client/consensus/babe/src/tests.rs  |   5 +
 substrate/client/consensus/beefy/Cargo.toml   |   1 +
 substrate/client/consensus/beefy/src/lib.rs   |  30 +-
 substrate/client/consensus/beefy/src/tests.rs |   8 +-
 .../client/consensus/beefy/src/worker.rs      |  39 +-
 .../grandpa/src/communication/mod.rs          |  45 +-
 .../grandpa/src/communication/tests.rs        |  40 +-
 .../consensus/grandpa/src/environment.rs      |  31 +-
 substrate/client/consensus/grandpa/src/lib.rs |  35 +-
 .../client/consensus/grandpa/src/observer.rs  |  19 +-
 .../client/consensus/grandpa/src/tests.rs     |  71 +-
 substrate/client/informant/src/display.rs     |  11 +-
 substrate/client/informant/src/lib.rs         |  20 +-
 substrate/client/network-gossip/src/bridge.rs | 123 ++-
 substrate/client/network-gossip/src/lib.rs    |  18 +-
 .../network-gossip/src/state_machine.rs       |   4 +
 substrate/client/network/common/Cargo.toml    |   6 +
 substrate/client/network/common/src/config.rs | 375 ++++++-
 .../network/common/src/protocol/event.rs      |  14 +-
 .../client/network/common/src/service.rs      |  35 +-
 substrate/client/network/common/src/sync.rs   |  61 +-
 substrate/client/network/src/behaviour.rs     |  39 +-
 substrate/client/network/src/config.rs        | 384 +-------
 substrate/client/network/src/lib.rs           |  20 +-
 substrate/client/network/src/protocol.rs      | 766 ++-------------
 .../src/protocol/notifications/behaviour.rs   |  50 +-
 substrate/client/network/src/service.rs       | 266 +----
 .../client/network/src/service/metrics.rs     |  35 +-
 .../client/network/src/service/out_events.rs  |  14 -
 .../network/src/service/tests/chain_sync.rs   | 420 --------
 .../client/network/src/service/tests/mod.rs   | 138 +--
 .../network/src/service/tests/service.rs      |  68 +-
 substrate/client/network/sync/Cargo.toml      |   1 +
 substrate/client/network/sync/src/engine.rs   | 924 ++++++++++++++++++
 substrate/client/network/sync/src/lib.rs      |  71 +-
 .../network/sync/src/service/chain_sync.rs    | 169 +++-
 .../client/network/sync/src/service/mock.rs   |  15 +-
 .../network/sync/src/service/network.rs       |  30 +-
 substrate/client/network/sync/src/tests.rs    |  78 --
 substrate/client/network/test/src/lib.rs      | 215 ++--
 substrate/client/network/test/src/sync.rs     |  27 +-
 .../client/network/transactions/src/lib.rs    |  76 +-
 substrate/client/service/src/builder.rs       |  78 +-
 substrate/client/service/src/config.rs        |   9 +-
 substrate/client/service/src/lib.rs           |  69 +-
 substrate/client/service/src/metrics.rs       |  23 +-
 substrate/client/service/test/Cargo.toml      |   1 +
 substrate/client/service/test/src/lib.rs      |  18 +-
 57 files changed, 2886 insertions(+), 2859 deletions(-)
 delete mode 100644 substrate/client/network/src/service/tests/chain_sync.rs
 create mode 100644 substrate/client/network/sync/src/engine.rs
 delete mode 100644 substrate/client/network/sync/src/tests.rs

diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index e9e33471a15..72b5943d48b 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -27,7 +27,7 @@ version = "0.19.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
 dependencies = [
- "gimli 0.27.1",
+ "gimli 0.27.2",
 ]
 
 [[package]]
@@ -55,6 +55,16 @@ dependencies = [
  "rand_core 0.6.4",
 ]
 
+[[package]]
+name = "aead"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8"
+dependencies = [
+ "crypto-common",
+ "generic-array 0.14.6",
+]
+
 [[package]]
 name = "aes"
 version = "0.6.0"
@@ -79,17 +89,14 @@ dependencies = [
 ]
 
 [[package]]
-name = "aes-gcm"
-version = "0.8.0"
+name = "aes"
+version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da"
+checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241"
 dependencies = [
- "aead 0.3.2",
- "aes 0.6.0",
- "cipher 0.2.5",
- "ctr 0.6.0",
- "ghash 0.3.1",
- "subtle",
+ "cfg-if",
+ "cipher 0.4.3",
+ "cpufeatures",
 ]
 
 [[package]]
@@ -106,6 +113,20 @@ dependencies = [
  "subtle",
 ]
 
+[[package]]
+name = "aes-gcm"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c"
+dependencies = [
+ "aead 0.5.1",
+ "aes 0.8.2",
+ "cipher 0.4.3",
+ "ctr 0.9.2",
+ "ghash 0.5.0",
+ "subtle",
+]
+
 [[package]]
 name = "aes-soft"
 version = "0.6.4"
@@ -246,7 +267,7 @@ dependencies = [
  "num-traits",
  "rusticata-macros",
  "thiserror",
- "time 0.3.17",
+ "time 0.3.20",
 ]
 
 [[package]]
@@ -262,7 +283,7 @@ dependencies = [
  "num-traits",
  "rusticata-macros",
  "thiserror",
- "time 0.3.17",
+ "time 0.3.20",
 ]
 
 [[package]]
@@ -369,19 +390,20 @@ dependencies = [
 
 [[package]]
 name = "async-stream"
-version = "0.3.3"
+version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e"
+checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e"
 dependencies = [
  "async-stream-impl",
  "futures-core",
+ "pin-project-lite 0.2.9",
 ]
 
 [[package]]
 name = "async-stream-impl"
-version = "0.3.3"
+version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27"
+checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -482,9 +504,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
 
 [[package]]
 name = "base64ct"
-version = "1.5.3"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
 
 [[package]]
 name = "basic-toml"
@@ -527,9 +549,9 @@ dependencies = [
 
 [[package]]
 name = "bindgen"
-version = "0.60.1"
+version = "0.64.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6"
+checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4"
 dependencies = [
  "bitflags",
  "cexpr",
@@ -542,6 +564,7 @@ dependencies = [
  "regex",
  "rustc-hash",
  "shlex",
+ "syn",
 ]
 
 [[package]]
@@ -573,24 +596,24 @@ dependencies = [
 
 [[package]]
 name = "blake2b_simd"
-version = "1.0.0"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127"
+checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc"
 dependencies = [
  "arrayref",
  "arrayvec 0.7.2",
- "constant_time_eq 0.1.5",
+ "constant_time_eq",
 ]
 
 [[package]]
 name = "blake2s_simd"
-version = "1.0.0"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4"
+checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f"
 dependencies = [
  "arrayref",
  "arrayvec 0.7.2",
- "constant_time_eq 0.1.5",
+ "constant_time_eq",
 ]
 
 [[package]]
@@ -603,7 +626,7 @@ dependencies = [
  "arrayvec 0.7.2",
  "cc",
  "cfg-if",
- "constant_time_eq 0.2.4",
+ "constant_time_eq",
 ]
 
 [[package]]
@@ -663,9 +686,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
 
 [[package]]
 name = "bounded-collections"
-version = "0.1.4"
+version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de2aff4807e40f478132150d80b031f2461d88f061851afcab537d7600c24120"
+checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370"
 dependencies = [
  "log",
  "parity-scale-codec",
@@ -681,9 +704,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
 
 [[package]]
 name = "bstr"
-version = "1.2.0"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7f0778972c64420fdedc63f09919c8a88bda7b25135357fd25a5d9f3257e832"
+checksum = "5ffdb39cb703212f3c11973452c2861b972f757b021158f3516ba10f2fa8b2c1"
 dependencies = [
  "memchr",
  "once_cell",
@@ -749,9 +772,9 @@ dependencies = [
 
 [[package]]
 name = "camino"
-version = "1.1.2"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055"
+checksum = "6031a462f977dd38968b6f23378356512feeace69cef817e1a4475108093cec3"
 dependencies = [
  "serde",
 ]
@@ -865,7 +888,7 @@ name = "chain-spec-builder"
 version = "2.0.0"
 dependencies = [
  "ansi_term",
- "clap 4.1.4",
+ "clap 4.1.8",
  "node-cli",
  "rand 0.8.5",
  "sc-chain-spec",
@@ -924,7 +947,7 @@ checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2"
 dependencies = [
  "core2",
  "multibase",
- "multihash",
+ "multihash 0.16.3",
  "serde",
  "unsigned-varint",
 ]
@@ -947,6 +970,16 @@ dependencies = [
  "generic-array 0.14.6",
 ]
 
+[[package]]
+name = "cipher"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e"
+dependencies = [
+ "crypto-common",
+ "inout",
+]
+
 [[package]]
 name = "ckb-merkle-mountain-range"
 version = "0.5.2"
@@ -958,9 +991,9 @@ dependencies = [
 
 [[package]]
 name = "clang-sys"
-version = "1.4.0"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3"
+checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a"
 dependencies = [
  "glob",
  "libc",
@@ -981,13 +1014,13 @@ dependencies = [
 
 [[package]]
 name = "clap"
-version = "4.1.4"
+version = "4.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76"
+checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5"
 dependencies = [
  "bitflags",
  "clap_derive",
- "clap_lex 0.3.1",
+ "clap_lex 0.3.2",
  "is-terminal",
  "once_cell",
  "strsim",
@@ -996,18 +1029,18 @@ dependencies = [
 
 [[package]]
 name = "clap_complete"
-version = "4.1.1"
+version = "4.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6540eedc41f8a5a76cf3d8d458057dcdf817be4158a55b5f861f7a5483de75"
+checksum = "501ff0a401473ea1d4c3b125ff95506b62c5bc5768d818634195fbb7c4ad5ff4"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
 ]
 
 [[package]]
 name = "clap_derive"
-version = "4.1.0"
+version = "4.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
+checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0"
 dependencies = [
  "heck",
  "proc-macro-error",
@@ -1027,9 +1060,9 @@ dependencies = [
 
 [[package]]
 name = "clap_lex"
-version = "0.3.1"
+version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade"
+checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09"
 dependencies = [
  "os_str_bytes",
 ]
@@ -1066,15 +1099,9 @@ dependencies = [
 
 [[package]]
 name = "const-oid"
-version = "0.9.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b"
-
-[[package]]
-name = "constant_time_eq"
-version = "0.1.5"
+version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
+checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913"
 
 [[package]]
 name = "constant_time_eq"
@@ -1125,12 +1152,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "cpuid-bool"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba"
-
 [[package]]
 name = "cranelift-bforest"
 version = "0.93.0"
@@ -1294,9 +1315,9 @@ dependencies = [
 
 [[package]]
 name = "crossbeam-channel"
-version = "0.5.6"
+version = "0.5.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
+checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
 dependencies = [
  "cfg-if",
  "crossbeam-utils",
@@ -1304,9 +1325,9 @@ dependencies = [
 
 [[package]]
 name = "crossbeam-deque"
-version = "0.8.2"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
 dependencies = [
  "cfg-if",
  "crossbeam-epoch",
@@ -1315,22 +1336,22 @@ dependencies = [
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.9.13"
+version = "0.9.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
+checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
 dependencies = [
  "autocfg",
  "cfg-if",
  "crossbeam-utils",
- "memoffset 0.7.1",
+ "memoffset 0.8.0",
  "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.14"
+version = "0.8.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
+checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
 dependencies = [
  "cfg-if",
 ]
@@ -1360,6 +1381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
 dependencies = [
  "generic-array 0.14.6",
+ "rand_core 0.6.4",
  "typenum",
 ]
 
@@ -1373,16 +1395,6 @@ dependencies = [
  "subtle",
 ]
 
-[[package]]
-name = "crypto-mac"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a"
-dependencies = [
- "generic-array 0.14.6",
- "subtle",
-]
-
 [[package]]
 name = "crypto-mac"
 version = "0.11.1"
@@ -1405,20 +1417,20 @@ dependencies = [
 
 [[package]]
 name = "ctr"
-version = "0.6.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f"
+checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea"
 dependencies = [
- "cipher 0.2.5",
+ "cipher 0.3.0",
 ]
 
 [[package]]
 name = "ctr"
-version = "0.8.0"
+version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea"
+checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
 dependencies = [
- "cipher 0.3.0",
+ "cipher 0.4.3",
 ]
 
 [[package]]
@@ -1463,9 +1475,9 @@ dependencies = [
 
 [[package]]
 name = "cxx"
-version = "1.0.89"
+version = "1.0.91"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9"
+checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62"
 dependencies = [
  "cc",
  "cxxbridge-flags",
@@ -1475,9 +1487,9 @@ dependencies = [
 
 [[package]]
 name = "cxx-build"
-version = "1.0.89"
+version = "1.0.91"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d"
+checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690"
 dependencies = [
  "cc",
  "codespan-reporting",
@@ -1490,15 +1502,15 @@ dependencies = [
 
 [[package]]
 name = "cxxbridge-flags"
-version = "1.0.89"
+version = "1.0.91"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a"
+checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf"
 
 [[package]]
 name = "cxxbridge-macro"
-version = "1.0.89"
+version = "1.0.91"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2"
+checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1804,9 +1816,9 @@ dependencies = [
 
 [[package]]
 name = "dyn-clone"
-version = "1.0.10"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60"
+checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30"
 
 [[package]]
 name = "ecdsa"
@@ -1999,9 +2011,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
 
 [[package]]
 name = "fastrand"
-version = "1.8.0"
+version = "1.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
+checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
 dependencies = [
  "instant",
 ]
@@ -2043,14 +2055,14 @@ dependencies = [
 
 [[package]]
 name = "filetime"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9"
+checksum = "8a3de6e8d11b22ff9edc6d916f890800597d60f8b2da1caf2955c274638d6412"
 dependencies = [
  "cfg-if",
  "libc",
  "redox_syscall",
- "windows-sys 0.42.0",
+ "windows-sys 0.45.0",
 ]
 
 [[package]]
@@ -2170,7 +2182,7 @@ dependencies = [
  "Inflector",
  "array-bytes",
  "chrono",
- "clap 4.1.4",
+ "clap 4.1.8",
  "comfy-table",
  "frame-benchmarking",
  "frame-support",
@@ -2261,7 +2273,7 @@ dependencies = [
 name = "frame-election-solution-type-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "frame-election-provider-solution-type",
  "frame-election-provider-support",
  "frame-support",
@@ -2725,22 +2737,22 @@ dependencies = [
 
 [[package]]
 name = "ghash"
-version = "0.3.1"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375"
+checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99"
 dependencies = [
  "opaque-debug 0.3.0",
- "polyval 0.4.5",
+ "polyval 0.5.3",
 ]
 
 [[package]]
 name = "ghash"
-version = "0.4.4"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99"
+checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40"
 dependencies = [
  "opaque-debug 0.3.0",
- "polyval 0.5.3",
+ "polyval 0.6.0",
 ]
 
 [[package]]
@@ -2756,9 +2768,9 @@ dependencies = [
 
 [[package]]
 name = "gimli"
-version = "0.27.1"
+version = "0.27.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec"
+checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
 
 [[package]]
 name = "git2"
@@ -2805,9 +2817,9 @@ dependencies = [
 
 [[package]]
 name = "h2"
-version = "0.3.15"
+version = "0.3.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4"
+checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d"
 dependencies = [
  "bytes",
  "fnv",
@@ -2901,9 +2913,9 @@ dependencies = [
 
 [[package]]
 name = "hermit-abi"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "856b5cb0902c2b6d65d5fd97dfa30f9b70c7538e770b98eab5ed52d8db923e01"
+checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
 
 [[package]]
 name = "hex"
@@ -2930,16 +2942,6 @@ dependencies = [
  "digest 0.9.0",
 ]
 
-[[package]]
-name = "hmac"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15"
-dependencies = [
- "crypto-mac 0.10.1",
- "digest 0.9.0",
-]
-
 [[package]]
 name = "hmac"
 version = "0.11.0"
@@ -2995,9 +2997,9 @@ dependencies = [
 
 [[package]]
 name = "http"
-version = "0.2.8"
+version = "0.2.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
+checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482"
 dependencies = [
  "bytes",
  "fnv",
@@ -3204,6 +3206,15 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8e04e2fd2b8188ea827b32ef11de88377086d690286ab35747ef7f9bf3ccb590"
 
+[[package]]
+name = "inout"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
+dependencies = [
+ "generic-array 0.14.6",
+]
+
 [[package]]
 name = "instant"
 version = "0.1.12"
@@ -3283,11 +3294,11 @@ checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146"
 
 [[package]]
 name = "is-terminal"
-version = "0.4.3"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef"
+checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
 dependencies = [
- "hermit-abi 0.3.0",
+ "hermit-abi 0.3.1",
  "io-lifetimes 1.0.5",
  "rustix 0.36.8",
  "windows-sys 0.45.0",
@@ -3685,14 +3696,14 @@ dependencies = [
  "futures-timer",
  "getrandom 0.2.8",
  "instant",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-dns",
  "libp2p-identify",
  "libp2p-kad",
  "libp2p-mdns",
  "libp2p-metrics",
  "libp2p-mplex",
- "libp2p-noise",
+ "libp2p-noise 0.41.0",
  "libp2p-ping",
  "libp2p-quic",
  "libp2p-request-response",
@@ -3702,7 +3713,7 @@ dependencies = [
  "libp2p-webrtc",
  "libp2p-websocket",
  "libp2p-yamux",
- "multiaddr",
+ "multiaddr 0.16.0",
  "parking_lot 0.12.1",
  "pin-project",
  "smallvec",
@@ -3723,8 +3734,42 @@ dependencies = [
  "futures-timer",
  "instant",
  "log",
- "multiaddr",
- "multihash",
+ "multiaddr 0.16.0",
+ "multihash 0.16.3",
+ "multistream-select",
+ "once_cell",
+ "parking_lot 0.12.1",
+ "pin-project",
+ "prost",
+ "prost-build",
+ "rand 0.8.5",
+ "rw-stream-sink",
+ "sec1",
+ "sha2 0.10.6",
+ "smallvec",
+ "thiserror",
+ "unsigned-varint",
+ "void",
+ "zeroize",
+]
+
+[[package]]
+name = "libp2p-core"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "881d9a54e97d97cdaa4125d48269d97ca8c40e5fefec6b85b30440dc60cc551f"
+dependencies = [
+ "asn1_der",
+ "bs58",
+ "ed25519-dalek",
+ "either",
+ "fnv",
+ "futures",
+ "futures-timer",
+ "instant",
+ "log",
+ "multiaddr 0.17.0",
+ "multihash 0.17.0",
  "multistream-select",
  "once_cell",
  "parking_lot 0.12.1",
@@ -3749,7 +3794,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8e42a271c1b49f789b92f7fc87749fa79ce5c7bdc88cbdfacb818a4bca47fec5"
 dependencies = [
  "futures",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "log",
  "parking_lot 0.12.1",
  "smallvec",
@@ -3765,7 +3810,7 @@ dependencies = [
  "asynchronous-codec",
  "futures",
  "futures-timer",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm",
  "log",
  "lru",
@@ -3791,7 +3836,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "instant",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm",
  "log",
  "prost",
@@ -3814,7 +3859,7 @@ dependencies = [
  "data-encoding",
  "futures",
  "if-watch",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm",
  "log",
  "rand 0.8.5",
@@ -3831,7 +3876,7 @@ version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5ad8a64f29da86005c86a4d2728b8a0719e9b192f4092b609fd8790acb9dec55"
 dependencies = [
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-identify",
  "libp2p-kad",
  "libp2p-ping",
@@ -3848,7 +3893,7 @@ dependencies = [
  "asynchronous-codec",
  "bytes",
  "futures",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "log",
  "nohash-hasher",
  "parking_lot 0.12.1",
@@ -3866,7 +3911,30 @@ dependencies = [
  "bytes",
  "curve25519-dalek 3.2.0",
  "futures",
- "libp2p-core",
+ "libp2p-core 0.38.0",
+ "log",
+ "once_cell",
+ "prost",
+ "prost-build",
+ "rand 0.8.5",
+ "sha2 0.10.6",
+ "snow",
+ "static_assertions",
+ "thiserror",
+ "x25519-dalek 1.1.1",
+ "zeroize",
+]
+
+[[package]]
+name = "libp2p-noise"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1216f9ec823ac7a2289b954674c54cbce81c9e45920b4fcf173018ede4295246"
+dependencies = [
+ "bytes",
+ "curve25519-dalek 3.2.0",
+ "futures",
+ "libp2p-core 0.39.0",
  "log",
  "once_cell",
  "prost",
@@ -3889,7 +3957,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "instant",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm",
  "log",
  "rand 0.8.5",
@@ -3898,15 +3966,15 @@ dependencies = [
 
 [[package]]
 name = "libp2p-quic"
-version = "0.7.0-alpha"
+version = "0.7.0-alpha.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01e7c867e95c8130667b24409d236d37598270e6da69b3baf54213ba31ffca59"
+checksum = "5971f629ff7519f4d4889a7c981f0dc09c6ad493423cd8a13ee442de241bc8c8"
 dependencies = [
  "bytes",
  "futures",
  "futures-timer",
  "if-watch",
- "libp2p-core",
+ "libp2p-core 0.39.0",
  "libp2p-tls",
  "log",
  "parking_lot 0.12.1",
@@ -3927,7 +3995,7 @@ dependencies = [
  "bytes",
  "futures",
  "instant",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm",
  "log",
  "rand 0.8.5",
@@ -3946,7 +4014,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "instant",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "libp2p-swarm-derive",
  "log",
  "pin-project",
@@ -3978,7 +4046,7 @@ dependencies = [
  "futures-timer",
  "if-watch",
  "libc",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "log",
  "socket2",
  "tokio",
@@ -3986,13 +4054,13 @@ dependencies = [
 
 [[package]]
 name = "libp2p-tls"
-version = "0.1.0-alpha"
+version = "0.1.0-alpha.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8"
+checksum = "e9baf6f6292149e124ee737d9a79dbee783f29473fc368c7faad9d157841078a"
 dependencies = [
  "futures",
  "futures-rustls",
- "libp2p-core",
+ "libp2p-core 0.39.0",
  "rcgen 0.10.0",
  "ring",
  "rustls 0.20.8",
@@ -4010,7 +4078,7 @@ checksum = "1bb1a35299860e0d4b3c02a3e74e3b293ad35ae0cee8a056363b0c862d082069"
 dependencies = [
  "futures",
  "js-sys",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "parity-send-wrapper",
  "wasm-bindgen",
  "wasm-bindgen-futures",
@@ -4018,9 +4086,9 @@ dependencies = [
 
 [[package]]
 name = "libp2p-webrtc"
-version = "0.4.0-alpha"
+version = "0.4.0-alpha.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb6cd86dd68cba72308ea05de1cebf3ba0ae6e187c40548167955d4e3970f6a"
+checksum = "db4401ec550d36f413310ba5d4bf564bb21f89fb1601cadb32b2300f8bc1eb5b"
 dependencies = [
  "async-trait",
  "asynchronous-codec",
@@ -4029,10 +4097,10 @@ dependencies = [
  "futures-timer",
  "hex",
  "if-watch",
- "libp2p-core",
- "libp2p-noise",
+ "libp2p-core 0.39.0",
+ "libp2p-noise 0.42.0",
  "log",
- "multihash",
+ "multihash 0.17.0",
  "prost",
  "prost-build",
  "prost-codec",
@@ -4056,7 +4124,7 @@ dependencies = [
  "either",
  "futures",
  "futures-rustls",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "log",
  "parking_lot 0.12.1",
  "quicksink",
@@ -4073,7 +4141,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4f63594a0aa818642d9d4915c791945053877253f08a3626f13416b5cd928a29"
 dependencies = [
  "futures",
- "libp2p-core",
+ "libp2p-core 0.38.0",
  "log",
  "parking_lot 0.12.1",
  "thiserror",
@@ -4082,9 +4150,9 @@ dependencies = [
 
 [[package]]
 name = "librocksdb-sys"
-version = "0.8.0+7.4.4"
+version = "0.8.3+7.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d"
+checksum = "557b255ff04123fcc176162f56ed0c9cd42d8f357cf55b3fabeb60f7413741b3"
 dependencies = [
  "bindgen",
  "bzip2-sys",
@@ -4374,6 +4442,15 @@ dependencies = [
  "autocfg",
 ]
 
+[[package]]
+name = "memoffset"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
+dependencies = [
+ "autocfg",
+]
+
 [[package]]
 name = "memory-db"
 version = "0.31.0"
@@ -4419,14 +4496,14 @@ dependencies = [
 
 [[package]]
 name = "mio"
-version = "0.8.5"
+version = "0.8.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
+checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
 dependencies = [
  "libc",
  "log",
  "wasi 0.11.0+wasi-snapshot-preview1",
- "windows-sys 0.42.0",
+ "windows-sys 0.45.0",
 ]
 
 [[package]]
@@ -4505,7 +4582,25 @@ dependencies = [
  "byteorder",
  "data-encoding",
  "multibase",
- "multihash",
+ "multihash 0.16.3",
+ "percent-encoding",
+ "serde",
+ "static_assertions",
+ "unsigned-varint",
+ "url",
+]
+
+[[package]]
+name = "multiaddr"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c"
+dependencies = [
+ "arrayref",
+ "byteorder",
+ "data-encoding",
+ "multibase",
+ "multihash 0.17.0",
  "percent-encoding",
  "serde",
  "static_assertions",
@@ -4541,6 +4636,19 @@ dependencies = [
  "unsigned-varint",
 ]
 
+[[package]]
+name = "multihash"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40"
+dependencies = [
+ "core2",
+ "digest 0.10.6",
+ "multihash-derive",
+ "sha2 0.10.6",
+ "unsigned-varint",
+]
+
 [[package]]
 name = "multihash-derive"
 version = "0.8.1"
@@ -4708,7 +4816,7 @@ name = "node-bench"
 version = "0.9.0-dev"
 dependencies = [
  "array-bytes",
- "clap 4.1.4",
+ "clap 4.1.8",
  "derive_more",
  "fs_extra",
  "futures",
@@ -4745,7 +4853,7 @@ version = "3.0.0-dev"
 dependencies = [
  "array-bytes",
  "assert_cmd",
- "clap 4.1.4",
+ "clap 4.1.8",
  "clap_complete",
  "criterion",
  "frame-benchmarking-cli",
@@ -4786,6 +4894,7 @@ dependencies = [
  "sc-keystore",
  "sc-network",
  "sc-network-common",
+ "sc-network-sync",
  "sc-rpc",
  "sc-service",
  "sc-service-test",
@@ -4865,7 +4974,7 @@ dependencies = [
 name = "node-inspect"
 version = "0.9.0-dev"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "parity-scale-codec",
  "sc-cli",
  "sc-client-api",
@@ -4924,7 +5033,7 @@ dependencies = [
 name = "node-runtime-generate-bags"
 version = "3.0.0"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "generate-bags",
  "kitchensink-runtime",
 ]
@@ -4933,7 +5042,7 @@ dependencies = [
 name = "node-template"
 version = "4.0.0-dev"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "frame-benchmarking",
  "frame-benchmarking-cli",
  "frame-system",
@@ -5190,9 +5299,9 @@ dependencies = [
 
 [[package]]
 name = "once_cell"
-version = "1.17.0"
+version = "1.17.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
+checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
 
 [[package]]
 name = "oorandom"
@@ -6799,9 +6908,9 @@ dependencies = [
 
 [[package]]
 name = "parity-scale-codec"
-version = "3.3.0"
+version = "3.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed"
+checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac"
 dependencies = [
  "arrayvec 0.7.2",
  "bitvec",
@@ -6946,9 +7055,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
 
 [[package]]
 name = "pest"
-version = "2.5.4"
+version = "2.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f"
+checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660"
 dependencies = [
  "thiserror",
  "ucd-trie",
@@ -6956,9 +7065,9 @@ dependencies = [
 
 [[package]]
 name = "pest_derive"
-version = "2.5.4"
+version = "2.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea"
+checksum = "2ac3922aac69a40733080f53c1ce7f91dcf57e1a5f6c52f421fadec7fbdc4b69"
 dependencies = [
  "pest",
  "pest_generator",
@@ -6966,9 +7075,9 @@ dependencies = [
 
 [[package]]
 name = "pest_generator"
-version = "2.5.4"
+version = "2.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f"
+checksum = "d06646e185566b5961b4058dd107e0a7f56e77c3f484549fb119867773c0f202"
 dependencies = [
  "pest",
  "pest_meta",
@@ -6979,9 +7088,9 @@ dependencies = [
 
 [[package]]
 name = "pest_meta"
-version = "2.5.4"
+version = "2.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d"
+checksum = "e6f60b2ba541577e2a0c307c8f39d1439108120eb7903adeb6497fa880c59616"
 dependencies = [
  "once_cell",
  "pest",
@@ -7114,30 +7223,31 @@ checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede"
 dependencies = [
  "cpufeatures",
  "opaque-debug 0.3.0",
- "universal-hash",
+ "universal-hash 0.4.1",
 ]
 
 [[package]]
 name = "polyval"
-version = "0.4.5"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd"
+checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1"
 dependencies = [
- "cpuid-bool",
+ "cfg-if",
+ "cpufeatures",
  "opaque-debug 0.3.0",
- "universal-hash",
+ "universal-hash 0.4.1",
 ]
 
 [[package]]
 name = "polyval"
-version = "0.5.3"
+version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1"
+checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6"
 dependencies = [
  "cfg-if",
  "cpufeatures",
  "opaque-debug 0.3.0",
- "universal-hash",
+ "universal-hash 0.5.0",
 ]
 
 [[package]]
@@ -7293,9 +7403,9 @@ dependencies = [
 
 [[package]]
 name = "prost"
-version = "0.11.6"
+version = "0.11.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698"
+checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537"
 dependencies = [
  "bytes",
  "prost-derive",
@@ -7303,9 +7413,9 @@ dependencies = [
 
 [[package]]
 name = "prost-build"
-version = "0.11.6"
+version = "0.11.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e"
+checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12"
 dependencies = [
  "bytes",
  "heck",
@@ -7338,9 +7448,9 @@ dependencies = [
 
 [[package]]
 name = "prost-derive"
-version = "0.11.6"
+version = "0.11.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d"
+checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b"
 dependencies = [
  "anyhow",
  "itertools",
@@ -7351,11 +7461,10 @@ dependencies = [
 
 [[package]]
 name = "prost-types"
-version = "0.11.6"
+version = "0.11.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788"
+checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88"
 dependencies = [
- "bytes",
  "prost",
 ]
 
@@ -7553,7 +7662,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd"
 dependencies = [
  "pem",
  "ring",
- "time 0.3.17",
+ "time 0.3.20",
  "x509-parser 0.13.2",
  "yasna",
 ]
@@ -7566,7 +7675,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b"
 dependencies = [
  "pem",
  "ring",
- "time 0.3.17",
+ "time 0.3.20",
  "yasna",
 ]
 
@@ -7660,15 +7769,6 @@ dependencies = [
  "winapi",
 ]
 
-[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-dependencies = [
- "winapi",
-]
-
 [[package]]
 name = "resolv-conf"
 version = "0.7.0"
@@ -8070,7 +8170,7 @@ version = "0.10.0-dev"
 dependencies = [
  "array-bytes",
  "chrono",
- "clap 4.1.4",
+ "clap 4.1.8",
  "fdlimit",
  "futures",
  "futures-timer",
@@ -8323,6 +8423,7 @@ dependencies = [
  "sc-network",
  "sc-network-common",
  "sc-network-gossip",
+ "sc-network-sync",
  "sc-network-test",
  "sc-utils",
  "serde",
@@ -8734,6 +8835,7 @@ dependencies = [
 name = "sc-network-common"
 version = "0.10.0-dev"
 dependencies = [
+ "array-bytes",
  "async-trait",
  "bitflags",
  "bytes",
@@ -8745,6 +8847,7 @@ dependencies = [
  "prost-build",
  "sc-consensus",
  "sc-peerset",
+ "sc-utils",
  "serde",
  "smallvec",
  "sp-blockchain",
@@ -8752,7 +8855,9 @@ dependencies = [
  "sp-consensus-grandpa",
  "sp-runtime",
  "substrate-prometheus-endpoint",
+ "tempfile",
  "thiserror",
+ "zeroize",
 ]
 
 [[package]]
@@ -8803,6 +8908,7 @@ dependencies = [
  "async-trait",
  "fork-tree",
  "futures",
+ "futures-timer",
  "libp2p",
  "log",
  "lru",
@@ -9137,6 +9243,7 @@ dependencies = [
  "sc-executor",
  "sc-network",
  "sc-network-common",
+ "sc-network-sync",
  "sc-service",
  "sc-transaction-pool-api",
  "sp-api",
@@ -9169,7 +9276,7 @@ dependencies = [
 name = "sc-storage-monitor"
 version = "0.1.0"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "fs4",
  "futures",
  "log",
@@ -9565,9 +9672,9 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.92"
+version = "1.0.93"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a"
+checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
 dependencies = [
  "itoa",
  "ryu",
@@ -9587,6 +9694,17 @@ dependencies = [
  "opaque-debug 0.3.0",
 ]
 
+[[package]]
+name = "sha1"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
+dependencies = [
+ "cfg-if",
+ "cpufeatures",
+ "digest 0.10.6",
+]
+
 [[package]]
 name = "sha2"
 version = "0.8.2"
@@ -9650,9 +9768,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
 
 [[package]]
 name = "signal-hook-registry"
-version = "1.4.0"
+version = "1.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
+checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
 dependencies = [
  "libc",
 ]
@@ -9688,9 +9806,9 @@ checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
 
 [[package]]
 name = "slab"
-version = "0.4.7"
+version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef"
+checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d"
 dependencies = [
  "autocfg",
 ]
@@ -10231,7 +10349,7 @@ dependencies = [
 name = "sp-npos-elections-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "honggfuzz",
  "parity-scale-codec",
  "rand 0.8.5",
@@ -10588,9 +10706,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
 
 [[package]]
 name = "spin"
-version = "0.9.4"
+version = "0.9.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
+checksum = "7dccf47db1b41fa1573ed27ccf5e08e3ca771cb994f776668c5ebda893b248fc"
 
 [[package]]
 name = "spki"
@@ -10604,9 +10722,9 @@ dependencies = [
 
 [[package]]
 name = "ss58-registry"
-version = "1.38.0"
+version = "1.39.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e40c020d72bc0a9c5660bb71e4a6fdef081493583062c474740a7d59f55f0e7b"
+checksum = "ecf0bd63593ef78eca595a7fc25e9a443ca46fe69fd472f8f09f5245cdcd769d"
 dependencies = [
  "Inflector",
  "num-format",
@@ -10708,7 +10826,7 @@ dependencies = [
 name = "subkey"
 version = "3.0.0"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "sc-cli",
 ]
 
@@ -10736,7 +10854,7 @@ dependencies = [
 name = "substrate-frame-cli"
 version = "4.0.0-dev"
 dependencies = [
- "clap 4.1.4",
+ "clap 4.1.8",
  "frame-support",
  "frame-system",
  "sc-cli",
@@ -10997,9 +11115,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
 
 [[package]]
 name = "syn"
-version = "1.0.107"
+version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -11047,22 +11165,21 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
 
 [[package]]
 name = "target-lexicon"
-version = "0.12.5"
+version = "0.12.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d"
+checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5"
 
 [[package]]
 name = "tempfile"
-version = "3.3.0"
+version = "3.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
+checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95"
 dependencies = [
  "cfg-if",
  "fastrand",
- "libc",
  "redox_syscall",
- "remove_dir_all",
- "winapi",
+ "rustix 0.36.8",
+ "windows-sys 0.42.0",
 ]
 
 [[package]]
@@ -11114,10 +11231,11 @@ checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820"
 
 [[package]]
 name = "thread_local"
-version = "1.1.4"
+version = "1.1.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
+checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
 dependencies = [
+ "cfg-if",
  "once_cell",
 ]
 
@@ -11153,9 +11271,9 @@ dependencies = [
 
 [[package]]
 name = "time"
-version = "0.3.17"
+version = "0.3.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
+checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
 dependencies = [
  "itoa",
  "serde",
@@ -11171,9 +11289,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
 
 [[package]]
 name = "time-macros"
-version = "0.2.6"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2"
+checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36"
 dependencies = [
  "time-core",
 ]
@@ -11275,9 +11393,9 @@ dependencies = [
 
 [[package]]
 name = "tokio-stream"
-version = "0.1.11"
+version = "0.1.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce"
+checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313"
 dependencies = [
  "futures-core",
  "pin-project-lite 0.2.9",
@@ -11300,9 +11418,9 @@ dependencies = [
 
 [[package]]
 name = "tokio-util"
-version = "0.7.4"
+version = "0.7.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
+checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2"
 dependencies = [
  "bytes",
  "futures-core",
@@ -11574,7 +11692,7 @@ name = "try-runtime-cli"
 version = "0.10.0-dev"
 dependencies = [
  "async-trait",
- "clap 4.1.4",
+ "clap 4.1.8",
  "frame-remote-externalities",
  "frame-try-runtime",
  "hex",
@@ -11726,6 +11844,16 @@ dependencies = [
  "subtle",
 ]
 
+[[package]]
+name = "universal-hash"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5"
+dependencies = [
+ "crypto-common",
+ "subtle",
+]
+
 [[package]]
 name = "unsigned-varint"
 version = "0.7.1"
@@ -11919,9 +12047,9 @@ checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
 
 [[package]]
 name = "wasm-encoder"
-version = "0.22.1"
+version = "0.24.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a584273ccc2d9311f1dd19dc3fb26054661fa3e373d53ede5d1144ba07a9acd"
+checksum = "68f7d56227d910901ce12dfd19acc40c12687994dfb3f57c90690f80be946ec5"
 dependencies = [
  "leb128",
 ]
@@ -12017,7 +12145,7 @@ version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "01bf50edb2ea9d922aa75a7bf3c15e26a6c9e2d18c56e862b49737a582901729"
 dependencies = [
- "spin 0.9.4",
+ "spin 0.9.5",
  "wasmi_arena",
  "wasmi_core 0.5.0",
  "wasmparser-nostd",
@@ -12263,9 +12391,9 @@ dependencies = [
 
 [[package]]
 name = "wast"
-version = "52.0.3"
+version = "54.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15942180f265280eede7bc38b239e9770031d1821c02d905284216c645316430"
+checksum = "3d48d9d731d835f4f8dacbb8de7d47be068812cb9877f5c60d408858778d8d2a"
 dependencies = [
  "leb128",
  "memchr",
@@ -12275,9 +12403,9 @@ dependencies = [
 
 [[package]]
 name = "wat"
-version = "1.0.57"
+version = "1.0.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37212100d4cbe6f0f6ff6e707f1e5a5b5b675f0451231ed9e4235e234e127ed3"
+checksum = "d1db2e3ed05ea31243761439194bec3af6efbbaf87c4c8667fb879e4f23791a0"
 dependencies = [
  "wast",
 ]
@@ -12347,7 +12475,7 @@ dependencies = [
  "sha2 0.10.6",
  "stun",
  "thiserror",
- "time 0.3.17",
+ "time 0.3.20",
  "tokio",
  "turn",
  "url",
@@ -12379,12 +12507,12 @@ dependencies = [
 
 [[package]]
 name = "webrtc-dtls"
-version = "0.7.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f"
+checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05"
 dependencies = [
  "aes 0.6.0",
- "aes-gcm 0.8.0",
+ "aes-gcm 0.10.1",
  "async-trait",
  "bincode",
  "block-modes",
@@ -12394,7 +12522,7 @@ dependencies = [
  "der-parser 8.1.0",
  "elliptic-curve",
  "hkdf",
- "hmac 0.10.1",
+ "hmac 0.12.1",
  "log",
  "oid-registry 0.6.1",
  "p256",
@@ -12406,8 +12534,8 @@ dependencies = [
  "rustls 0.19.1",
  "sec1",
  "serde",
- "sha-1",
- "sha2 0.9.9",
+ "sha1",
+ "sha2 0.10.6",
  "signature",
  "subtle",
  "thiserror",
@@ -12420,9 +12548,9 @@ dependencies = [
 
 [[package]]
 name = "webrtc-ice"
-version = "0.9.0"
+version = "0.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7"
+checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80"
 dependencies = [
  "arc-swap",
  "async-trait",
@@ -12555,9 +12683,9 @@ dependencies = [
 
 [[package]]
 name = "wide"
-version = "0.7.6"
+version = "0.7.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "feff0a412894d67223777b6cc8d68c0dab06d52d95e9890d5f2d47f10dd9366c"
+checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223"
 dependencies = [
  "bytemuck",
  "safe_arch",
@@ -12780,7 +12908,7 @@ dependencies = [
  "ring",
  "rusticata-macros",
  "thiserror",
- "time 0.3.17",
+ "time 0.3.20",
 ]
 
 [[package]]
@@ -12798,7 +12926,7 @@ dependencies = [
  "oid-registry 0.6.1",
  "rusticata-macros",
  "thiserror",
- "time 0.3.17",
+ "time 0.3.20",
 ]
 
 [[package]]
@@ -12827,7 +12955,7 @@ version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4"
 dependencies = [
- "time 0.3.17",
+ "time 0.3.20",
 ]
 
 [[package]]
@@ -12872,9 +13000,9 @@ dependencies = [
 
 [[package]]
 name = "zstd-sys"
-version = "2.0.6+zstd.1.5.2"
+version = "2.0.7+zstd.1.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68a3f9792c0c3dc6c165840a75f47ae1f4da402c2d006881129579f6597e801b"
+checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5"
 dependencies = [
  "cc",
  "libc",
diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs
index 98485a7ad67..34e4e566d92 100644
--- a/substrate/bin/node-template/node/src/service.rs
+++ b/substrate/bin/node-template/node/src/service.rs
@@ -192,7 +192,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		Vec::default(),
 	));
 
-	let (network, system_rpc_tx, tx_handler_controller, network_starter) =
+	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
 			client: client.clone(),
@@ -240,6 +240,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		backend,
 		system_rpc_tx,
 		tx_handler_controller,
+		sync_service: sync_service.clone(),
 		config,
 		telemetry: telemetry.as_mut(),
 	})?;
@@ -276,8 +277,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 				force_authoring,
 				backoff_authoring_blocks,
 				keystore: keystore_container.sync_keystore(),
-				sync_oracle: network.clone(),
-				justification_sync_link: network.clone(),
+				sync_oracle: sync_service.clone(),
+				justification_sync_link: sync_service.clone(),
 				block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
 				max_block_proposal_slot_portion: None,
 				telemetry: telemetry.as_ref().map(|x| x.handle()),
@@ -320,6 +321,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 			config: grandpa_config,
 			link: grandpa_link,
 			network,
+			sync: Arc::new(sync_service),
 			voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
 			prometheus_registry,
 			shared_voter_state: SharedVoterState::empty(),
diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml
index b93019b7cc2..4451935c360 100644
--- a/substrate/bin/node/cli/Cargo.toml
+++ b/substrate/bin/node/cli/Cargo.toml
@@ -68,6 +68,7 @@ sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transacti
 sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" }
 sc-network = { version = "0.10.0-dev", path = "../../../client/network" }
 sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" }
+sc-network-sync = { version = "0.10.0-dev", path = "../../../client/network/sync" }
 sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" }
 sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" }
 grandpa = { version = "0.10.0-dev", package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" }
diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs
index 178b97823fb..4732e12f9c7 100644
--- a/substrate/bin/node/cli/src/chain_spec.rs
+++ b/substrate/bin/node/cli/src/chain_spec.rs
@@ -479,12 +479,13 @@ pub(crate) mod tests {
 		sp_tracing::try_init_simple();
 
 		sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| {
-			let NewFullBase { task_manager, client, network, transaction_pool, .. } =
+			let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
 				new_full_base(config, false, |_, _| ())?;
 			Ok(sc_service_test::TestNetComponents::new(
 				task_manager,
 				client,
 				network,
+				sync,
 				transaction_pool,
 			))
 		});
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 268a170a821..6e000a4755e 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -35,6 +35,7 @@ use sc_network::NetworkService;
 use sc_network_common::{
 	protocol::event::Event, service::NetworkEventStream, sync::warp::WarpSyncParams,
 };
+use sc_network_sync::SyncingService;
 use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryWorker};
 use sp_api::ProvideRuntimeApi;
@@ -303,6 +304,8 @@ pub struct NewFullBase {
 	pub client: Arc<FullClient>,
 	/// The networking service of the node.
 	pub network: Arc<NetworkService<Block, <Block as BlockT>::Hash>>,
+	/// The syncing service of the node.
+	pub sync: Arc<SyncingService<Block>>,
 	/// The transaction pool of the node.
 	pub transaction_pool: Arc<TransactionPool>,
 	/// The rpc handlers of the node.
@@ -353,7 +356,7 @@ pub fn new_full_base(
 		Vec::default(),
 	));
 
-	let (network, system_rpc_tx, tx_handler_controller, network_starter) =
+	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
 			client: client.clone(),
@@ -392,6 +395,7 @@ pub fn new_full_base(
 		task_manager: &mut task_manager,
 		system_rpc_tx,
 		tx_handler_controller,
+		sync_service: sync_service.clone(),
 		telemetry: telemetry.as_mut(),
 	})?;
 
@@ -434,8 +438,8 @@ pub fn new_full_base(
 			select_chain,
 			env: proposer,
 			block_import,
-			sync_oracle: network.clone(),
-			justification_sync_link: network.clone(),
+			sync_oracle: sync_service.clone(),
+			justification_sync_link: sync_service.clone(),
 			create_inherent_data_providers: move |parent, ()| {
 				let client_clone = client_clone.clone();
 				async move {
@@ -531,6 +535,7 @@ pub fn new_full_base(
 			config,
 			link: grandpa_link,
 			network: network.clone(),
+			sync: Arc::new(sync_service.clone()),
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
 			voting_rule: grandpa::VotingRulesBuilder::default().build(),
 			prometheus_registry,
@@ -547,7 +552,14 @@ pub fn new_full_base(
 	}
 
 	network_starter.start_network();
-	Ok(NewFullBase { task_manager, client, network, transaction_pool, rpc_handlers })
+	Ok(NewFullBase {
+		task_manager,
+		client,
+		network,
+		sync: sync_service,
+		transaction_pool,
+		rpc_handlers,
+	})
 }
 
 /// Builds a new service for a full client.
@@ -627,7 +639,7 @@ mod tests {
 			chain_spec,
 			|config| {
 				let mut setup_handles = None;
-				let NewFullBase { task_manager, client, network, transaction_pool, .. } =
+				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
 					new_full_base(
 						config,
 						false,
@@ -641,6 +653,7 @@ mod tests {
 					task_manager,
 					client,
 					network,
+					sync,
 					transaction_pool,
 				);
 				Ok((node, setup_handles.unwrap()))
@@ -807,12 +820,13 @@ mod tests {
 		sc_service_test::consensus(
 			crate::chain_spec::tests::integration_test_config_with_two_authorities(),
 			|config| {
-				let NewFullBase { task_manager, client, network, transaction_pool, .. } =
+				let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
 					new_full_base(config, false, |_, _| ())?;
 				Ok(sc_service_test::TestNetComponents::new(
 					task_manager,
 					client,
 					network,
+					sync,
 					transaction_pool,
 				))
 			},
diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs
index c3399a89680..472c1722f9e 100644
--- a/substrate/client/cli/src/arg_enums.rs
+++ b/substrate/client/cli/src/arg_enums.rs
@@ -251,15 +251,19 @@ pub enum SyncMode {
 	Warp,
 }
 
-impl Into<sc_network::config::SyncMode> for SyncMode {
-	fn into(self) -> sc_network::config::SyncMode {
+impl Into<sc_network_common::config::SyncMode> for SyncMode {
+	fn into(self) -> sc_network_common::config::SyncMode {
 		match self {
-			SyncMode::Full => sc_network::config::SyncMode::Full,
-			SyncMode::Fast =>
-				sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false },
-			SyncMode::FastUnsafe =>
-				sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false },
-			SyncMode::Warp => sc_network::config::SyncMode::Warp,
+			SyncMode::Full => sc_network_common::config::SyncMode::Full,
+			SyncMode::Fast => sc_network_common::config::SyncMode::Fast {
+				skip_proofs: false,
+				storage_chain_mode: false,
+			},
+			SyncMode::FastUnsafe => sc_network_common::config::SyncMode::Fast {
+				skip_proofs: true,
+				storage_chain_mode: false,
+			},
+			SyncMode::Warp => sc_network_common::config::SyncMode::Warp,
 		}
 	}
 }
diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs
index 3aebee778d3..31b761938df 100644
--- a/substrate/client/cli/src/params/network_params.rs
+++ b/substrate/client/cli/src/params/network_params.rs
@@ -18,11 +18,8 @@
 
 use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams};
 use clap::Args;
-use sc_network::{
-	config::{NetworkConfiguration, NodeKeyConfig},
-	multiaddr::Protocol,
-};
-use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig};
+use sc_network::{config::NetworkConfiguration, multiaddr::Protocol};
+use sc_network_common::config::{NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig};
 use sc_service::{
 	config::{Multiaddr, MultiaddrWithPeerId},
 	ChainSpec, ChainType,
diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs
index 074b95bea0f..d470ef1fad1 100644
--- a/substrate/client/cli/src/params/node_key_params.rs
+++ b/substrate/client/cli/src/params/node_key_params.rs
@@ -17,7 +17,7 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use clap::Args;
-use sc_network::config::{identity::ed25519, NodeKeyConfig};
+use sc_network_common::config::{identity::ed25519, NodeKeyConfig};
 use sp_core::H256;
 use std::{path::PathBuf, str::FromStr};
 
@@ -92,7 +92,7 @@ impl NodeKeyParams {
 				let secret = if let Some(node_key) = self.node_key.as_ref() {
 					parse_ed25519_secret(node_key)?
 				} else {
-					sc_network::config::Secret::File(
+					sc_network_common::config::Secret::File(
 						self.node_key_file
 							.clone()
 							.unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)),
@@ -111,10 +111,10 @@ fn invalid_node_key(e: impl std::fmt::Display) -> error::Error {
 }
 
 /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`.
-fn parse_ed25519_secret(hex: &str) -> error::Result<sc_network::config::Ed25519Secret> {
+fn parse_ed25519_secret(hex: &str) -> error::Result<sc_network_common::config::Ed25519Secret> {
 	H256::from_str(hex).map_err(invalid_node_key).and_then(|bytes| {
 		ed25519::SecretKey::from_bytes(bytes)
-			.map(sc_network::config::Secret::Input)
+			.map(sc_network_common::config::Secret::Input)
 			.map_err(invalid_node_key)
 	})
 }
@@ -123,7 +123,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result<sc_network::config::Ed25519S
 mod tests {
 	use super::*;
 	use clap::ValueEnum;
-	use sc_network::config::identity::{ed25519, Keypair};
+	use sc_network_common::config::identity::{ed25519, Keypair};
 	use std::fs;
 
 	#[test]
@@ -140,7 +140,7 @@ mod tests {
 					node_key_file: None,
 				};
 				params.node_key(net_config_dir).and_then(|c| match c {
-					NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski))
+					NodeKeyConfig::Ed25519(sc_network_common::config::Secret::Input(ref ski))
 						if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() =>
 						Ok(()),
 					_ => Err(error::Error::Input("Unexpected node key config".into())),
@@ -200,7 +200,7 @@ mod tests {
 				let dir = PathBuf::from(net_config_dir.clone());
 				let typ = params.node_key_type;
 				params.node_key(net_config_dir).and_then(move |c| match c {
-					NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f))
+					NodeKeyConfig::Ed25519(sc_network_common::config::Secret::File(ref f))
 						if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) =>
 						Ok(()),
 					_ => Err(error::Error::Input("Unexpected node key config".into())),
diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs
index f5f70857c0f..e90494ddfc1 100644
--- a/substrate/client/consensus/aura/src/lib.rs
+++ b/substrate/client/consensus/aura/src/lib.rs
@@ -766,6 +766,11 @@ mod tests {
 		fn peers(&self) -> &Vec<AuraPeer> {
 			&self.peers
 		}
+
+		fn peers_mut(&mut self) -> &mut Vec<AuraPeer> {
+			&mut self.peers
+		}
+
 		fn mut_peers<F: FnOnce(&mut Vec<AuraPeer>)>(&mut self, closure: F) {
 			closure(&mut self.peers);
 		}
diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs
index 42b8730cdd9..fcdada94c45 100644
--- a/substrate/client/consensus/babe/src/tests.rs
+++ b/substrate/client/consensus/babe/src/tests.rs
@@ -349,6 +349,11 @@ impl TestNetFactory for BabeTestNet {
 		&self.peers
 	}
 
+	fn peers_mut(&mut self) -> &mut Vec<BabePeer> {
+		trace!(target: "babe", "Retrieving peers, mutable");
+		&mut self.peers
+	}
+
 	fn mut_peers<F: FnOnce(&mut Vec<BabePeer>)>(&mut self, closure: F) {
 		closure(&mut self.peers);
 	}
diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml
index d88c07df261..161d53777eb 100644
--- a/substrate/client/consensus/beefy/Cargo.toml
+++ b/substrate/client/consensus/beefy/Cargo.toml
@@ -25,6 +25,7 @@ sc-keystore = { version = "4.0.0-dev", path = "../../keystore" }
 sc-network = { version = "0.10.0-dev", path = "../../network" }
 sc-network-common = { version = "0.10.0-dev", path = "../../network/common" }
 sc-network-gossip = { version = "0.10.0-dev", path = "../../network-gossip" }
+sc-network-sync = { version = "0.10.0-dev", path = "../../network/sync" }
 sc-utils = { version = "4.0.0-dev", path = "../../utils" }
 sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
 sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" }
diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs
index 9b627e3ff8f..67758a4979b 100644
--- a/substrate/client/consensus/beefy/src/lib.rs
+++ b/substrate/client/consensus/beefy/src/lib.rs
@@ -40,7 +40,7 @@ use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotificatio
 use sc_consensus::BlockImport;
 use sc_network::ProtocolName;
 use sc_network_common::service::NetworkRequest;
-use sc_network_gossip::{GossipEngine, Network as GossipNetwork};
+use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing};
 use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi};
 use sp_blockchain::{
 	Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult,
@@ -172,9 +172,11 @@ where
 }
 
 /// BEEFY gadget network parameters.
-pub struct BeefyNetworkParams<B: Block, N> {
+pub struct BeefyNetworkParams<B: Block, N, S> {
 	/// Network implementing gossip, requests and sync-oracle.
 	pub network: Arc<N>,
+	/// Syncing service implementing a sync oracle and an event stream for peers.
+	pub sync: Arc<S>,
 	/// Chain specific BEEFY gossip protocol name. See
 	/// [`communication::beefy_protocol_name::gossip_protocol_name`].
 	pub gossip_protocol_name: ProtocolName,
@@ -186,7 +188,7 @@ pub struct BeefyNetworkParams<B: Block, N> {
 }
 
 /// BEEFY gadget initialization parameters.
-pub struct BeefyParams<B: Block, BE, C, N, P, R> {
+pub struct BeefyParams<B: Block, BE, C, N, P, R, S> {
 	/// BEEFY client
 	pub client: Arc<C>,
 	/// Client Backend
@@ -198,7 +200,7 @@ pub struct BeefyParams<B: Block, BE, C, N, P, R> {
 	/// Local key store
 	pub key_store: Option<SyncCryptoStorePtr>,
 	/// BEEFY voter network params
-	pub network_params: BeefyNetworkParams<B, N>,
+	pub network_params: BeefyNetworkParams<B, N, S>,
 	/// Minimal delta between blocks, BEEFY should vote for
 	pub min_block_delta: u32,
 	/// Prometheus metric registry
@@ -212,15 +214,17 @@ pub struct BeefyParams<B: Block, BE, C, N, P, R> {
 /// Start the BEEFY gadget.
 ///
 /// This is a thin shim around running and awaiting a BEEFY worker.
-pub async fn start_beefy_gadget<B, BE, C, N, P, R>(beefy_params: BeefyParams<B, BE, C, N, P, R>)
-where
+pub async fn start_beefy_gadget<B, BE, C, N, P, R, S>(
+	beefy_params: BeefyParams<B, BE, C, N, P, R, S>,
+) where
 	B: Block,
 	BE: Backend<B>,
 	C: Client<B, BE> + BlockBackend<B>,
 	P: PayloadProvider<B>,
 	R: ProvideRuntimeApi<B>,
 	R::Api: BeefyApi<B> + MmrApi<B, MmrRootHash, NumberFor<B>>,
-	N: GossipNetwork<B> + NetworkRequest + SyncOracle + Send + Sync + 'static,
+	N: GossipNetwork<B> + NetworkRequest + Send + Sync + 'static,
+	S: GossipSyncing<B> + SyncOracle + 'static,
 {
 	let BeefyParams {
 		client,
@@ -235,14 +239,20 @@ where
 		on_demand_justifications_handler,
 	} = beefy_params;
 
-	let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } =
-		network_params;
+	let BeefyNetworkParams {
+		network,
+		sync,
+		gossip_protocol_name,
+		justifications_protocol_name,
+		..
+	} = network_params;
 
 	let known_peers = Arc::new(Mutex::new(KnownPeers::new()));
 	let gossip_validator =
 		Arc::new(communication::gossip::GossipValidator::new(known_peers.clone()));
 	let mut gossip_engine = sc_network_gossip::GossipEngine::new(
 		network.clone(),
+		sync.clone(),
 		gossip_protocol_name,
 		gossip_validator.clone(),
 		None,
@@ -280,7 +290,7 @@ where
 		backend,
 		payload_provider,
 		runtime,
-		network,
+		sync,
 		key_store: key_store.into(),
 		gossip_engine,
 		gossip_validator,
diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs
index e4fe16a2acf..fb1c45b90a8 100644
--- a/substrate/client/consensus/beefy/src/tests.rs
+++ b/substrate/client/consensus/beefy/src/tests.rs
@@ -223,6 +223,10 @@ impl TestNetFactory for BeefyTestNet {
 		&self.peers
 	}
 
+	fn peers_mut(&mut self) -> &mut Vec<BeefyPeer> {
+		&mut self.peers
+	}
+
 	fn mut_peers<F: FnOnce(&mut Vec<BeefyPeer>)>(&mut self, closure: F) {
 		closure(&mut self.peers);
 	}
@@ -353,6 +357,7 @@ async fn voter_init_setup(
 		Arc::new(crate::communication::gossip::GossipValidator::new(known_peers));
 	let mut gossip_engine = sc_network_gossip::GossipEngine::new(
 		net.peer(0).network_service().clone(),
+		net.peer(0).sync_service().clone(),
 		"/beefy/whatever",
 		gossip_validator,
 		None,
@@ -389,6 +394,7 @@ where
 
 		let network_params = crate::BeefyNetworkParams {
 			network: peer.network_service().clone(),
+			sync: peer.sync_service().clone(),
 			gossip_protocol_name: beefy_gossip_proto_name(),
 			justifications_protocol_name: on_demand_justif_handler.protocol_name(),
 			_phantom: PhantomData,
@@ -407,7 +413,7 @@ where
 			prometheus_registry: None,
 			on_demand_justifications_handler: on_demand_justif_handler,
 		};
-		let task = crate::start_beefy_gadget::<_, _, _, _, _, _>(beefy_params);
+		let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params);
 
 		fn assert_send<T: Send>(_: &T) {}
 		assert_send(&task);
diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs
index 7f311495864..3f29dc7abee 100644
--- a/substrate/client/consensus/beefy/src/worker.rs
+++ b/substrate/client/consensus/beefy/src/worker.rs
@@ -33,7 +33,6 @@ use codec::{Codec, Decode, Encode};
 use futures::{stream::Fuse, FutureExt, StreamExt};
 use log::{debug, error, info, log_enabled, trace, warn};
 use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend};
-use sc_network_common::service::{NetworkEventStream, NetworkRequest};
 use sc_network_gossip::GossipEngine;
 use sc_utils::notification::NotificationReceiver;
 use sp_api::{BlockId, ProvideRuntimeApi};
@@ -244,11 +243,11 @@ impl<B: Block> VoterOracle<B> {
 	}
 }
 
-pub(crate) struct WorkerParams<B: Block, BE, P, R, N> {
+pub(crate) struct WorkerParams<B: Block, BE, P, R, S> {
 	pub backend: Arc<BE>,
 	pub payload_provider: P,
 	pub runtime: Arc<R>,
-	pub network: N,
+	pub sync: Arc<S>,
 	pub key_store: BeefyKeystore,
 	pub gossip_engine: GossipEngine<B>,
 	pub gossip_validator: Arc<GossipValidator<B>>,
@@ -296,12 +295,12 @@ impl<B: Block> PersistedState<B> {
 }
 
 /// A BEEFY worker plays the BEEFY protocol
-pub(crate) struct BeefyWorker<B: Block, BE, P, RuntimeApi, N> {
+pub(crate) struct BeefyWorker<B: Block, BE, P, RuntimeApi, S> {
 	// utilities
 	backend: Arc<BE>,
 	payload_provider: P,
 	runtime: Arc<RuntimeApi>,
-	network: N,
+	sync: Arc<S>,
 	key_store: BeefyKeystore,
 
 	// communication
@@ -330,14 +329,14 @@ pub(crate) struct BeefyWorker<B: Block, BE, P, RuntimeApi, N> {
 	persisted_state: PersistedState<B>,
 }
 
-impl<B, BE, P, R, N> BeefyWorker<B, BE, P, R, N>
+impl<B, BE, P, R, S> BeefyWorker<B, BE, P, R, S>
 where
 	B: Block + Codec,
 	BE: Backend<B>,
 	P: PayloadProvider<B>,
+	S: SyncOracle,
 	R: ProvideRuntimeApi<B>,
 	R::Api: BeefyApi<B>,
-	N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static,
 {
 	/// Return a new BEEFY worker instance.
 	///
@@ -345,13 +344,13 @@ where
 	/// BEEFY pallet has been deployed on-chain.
 	///
 	/// The BEEFY pallet is needed in order to keep track of the BEEFY authority set.
-	pub(crate) fn new(worker_params: WorkerParams<B, BE, P, R, N>) -> Self {
+	pub(crate) fn new(worker_params: WorkerParams<B, BE, P, R, S>) -> Self {
 		let WorkerParams {
 			backend,
 			payload_provider,
 			runtime,
 			key_store,
-			network,
+			sync,
 			gossip_engine,
 			gossip_validator,
 			on_demand_justifications,
@@ -364,7 +363,7 @@ where
 			backend,
 			payload_provider,
 			runtime,
-			network,
+			sync,
 			key_store,
 			gossip_engine,
 			gossip_validator,
@@ -836,7 +835,7 @@ where
 		}
 
 		// Don't bother voting or requesting justifications during major sync.
-		if !self.network.is_major_syncing() {
+		if !self.sync.is_major_syncing() {
 			// There were external events, 'state' is changed, author a vote if needed/possible.
 			if let Err(err) = self.try_to_vote() {
 				debug!(target: LOG_TARGET, "🥩 {}", err);
@@ -1065,7 +1064,7 @@ pub(crate) mod tests {
 	use futures::{future::poll_fn, task::Poll};
 	use parking_lot::Mutex;
 	use sc_client_api::{Backend as BackendT, HeaderBackend};
-	use sc_network::NetworkService;
+	use sc_network_sync::SyncingService;
 	use sc_network_test::TestNetFactory;
 	use sp_api::HeaderT;
 	use sp_blockchain::Backend as BlockchainBackendT;
@@ -1075,7 +1074,7 @@ pub(crate) mod tests {
 	};
 	use sp_runtime::traits::One;
 	use substrate_test_runtime_client::{
-		runtime::{Block, Digest, DigestItem, Header, H256},
+		runtime::{Block, Digest, DigestItem, Header},
 		Backend,
 	};
 
@@ -1113,7 +1112,7 @@ pub(crate) mod tests {
 		Backend,
 		MmrRootProvider<Block, TestApi>,
 		TestApi,
-		Arc<NetworkService<Block, H256>>,
+		Arc<SyncingService<Block>>,
 	> {
 		let keystore = create_beefy_keystore(*key);
 
@@ -1137,10 +1136,16 @@ pub(crate) mod tests {
 		let backend = peer.client().as_backend();
 		let api = Arc::new(TestApi::with_validator_set(&genesis_validator_set));
 		let network = peer.network_service().clone();
+		let sync = peer.sync_service().clone();
 		let known_peers = Arc::new(Mutex::new(KnownPeers::new()));
 		let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone()));
-		let gossip_engine =
-			GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None);
+		let gossip_engine = GossipEngine::new(
+			network.clone(),
+			sync.clone(),
+			"/beefy/1",
+			gossip_validator.clone(),
+			None,
+		);
 		let metrics = None;
 		let on_demand_justifications = OnDemandJustificationsEngine::new(
 			network.clone(),
@@ -1169,7 +1174,7 @@ pub(crate) mod tests {
 			gossip_engine,
 			gossip_validator,
 			metrics,
-			network,
+			sync: Arc::new(sync),
 			on_demand_justifications,
 			persisted_state,
 		};
diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs
index d3ade4bf3d4..6c7e3ea4651 100644
--- a/substrate/client/consensus/grandpa/src/communication/mod.rs
+++ b/substrate/client/consensus/grandpa/src/communication/mod.rs
@@ -59,7 +59,10 @@ use crate::{
 use gossip::{
 	FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage,
 };
-use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest};
+use sc_network_common::{
+	service::{NetworkBlock, NetworkSyncForkRequest},
+	sync::SyncEventStream,
+};
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber};
 
@@ -163,24 +166,35 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10;
 
 /// A handle to the network.
 ///
-/// Something that provides both the capabilities needed for the `gossip_network::Network` trait as
-/// well as the ability to set a fork sync request for a particular block.
-pub trait Network<Block: BlockT>:
+/// Something that provides the capabilities needed for the `gossip_network::Network` trait.
+pub trait Network<Block: BlockT>: GossipNetwork<Block> + Clone + Send + 'static {}
+
+impl<Block, T> Network<Block> for T
+where
+	Block: BlockT,
+	T: GossipNetwork<Block> + Clone + Send + 'static,
+{
+}
+
+/// A handle to syncing-related services.
+///
+/// Something that provides the ability to set a fork sync request for a particular block.
+pub trait Syncing<Block: BlockT>:
 	NetworkSyncForkRequest<Block::Hash, NumberFor<Block>>
 	+ NetworkBlock<Block::Hash, NumberFor<Block>>
-	+ GossipNetwork<Block>
+	+ SyncEventStream
 	+ Clone
 	+ Send
 	+ 'static
 {
 }
 
-impl<Block, T> Network<Block> for T
+impl<Block, T> Syncing<Block> for T
 where
 	Block: BlockT,
 	T: NetworkSyncForkRequest<Block::Hash, NumberFor<Block>>
 		+ NetworkBlock<Block::Hash, NumberFor<Block>>
-		+ GossipNetwork<Block>
+		+ SyncEventStream
 		+ Clone
 		+ Send
 		+ 'static,
@@ -198,8 +212,9 @@ pub(crate) fn global_topic<B: BlockT>(set_id: SetIdNumber) -> B::Hash {
 }
 
 /// Bridge between the underlying network service, gossiping consensus messages and Grandpa
-pub(crate) struct NetworkBridge<B: BlockT, N: Network<B>> {
+pub(crate) struct NetworkBridge<B: BlockT, N: Network<B>, S: Syncing<B>> {
 	service: N,
+	sync: S,
 	gossip_engine: Arc<Mutex<GossipEngine<B>>>,
 	validator: Arc<GossipValidator<B>>,
 
@@ -225,15 +240,16 @@ pub(crate) struct NetworkBridge<B: BlockT, N: Network<B>> {
 	telemetry: Option<TelemetryHandle>,
 }
 
-impl<B: BlockT, N: Network<B>> Unpin for NetworkBridge<B, N> {}
+impl<B: BlockT, N: Network<B>, S: Syncing<B>> Unpin for NetworkBridge<B, N, S> {}
 
-impl<B: BlockT, N: Network<B>> NetworkBridge<B, N> {
+impl<B: BlockT, N: Network<B>, S: Syncing<B>> NetworkBridge<B, N, S> {
 	/// Create a new NetworkBridge to the given NetworkService. Returns the service
 	/// handle.
 	/// On creation it will register previous rounds' votes with the gossip
 	/// service taken from the VoterSetState.
 	pub(crate) fn new(
 		service: N,
+		sync: S,
 		config: crate::Config,
 		set_state: crate::environment::SharedVoterSetState<B>,
 		prometheus_registry: Option<&Registry>,
@@ -246,6 +262,7 @@ impl<B: BlockT, N: Network<B>> NetworkBridge<B, N> {
 		let validator = Arc::new(validator);
 		let gossip_engine = Arc::new(Mutex::new(GossipEngine::new(
 			service.clone(),
+			sync.clone(),
 			protocol,
 			validator.clone(),
 			prometheus_registry,
@@ -290,6 +307,7 @@ impl<B: BlockT, N: Network<B>> NetworkBridge<B, N> {
 
 		NetworkBridge {
 			service,
+			sync,
 			gossip_engine,
 			validator,
 			neighbor_sender: neighbor_packet_sender,
@@ -475,11 +493,11 @@ impl<B: BlockT, N: Network<B>> NetworkBridge<B, N> {
 		hash: B::Hash,
 		number: NumberFor<B>,
 	) {
-		self.service.set_sync_fork_request(peers, hash, number)
+		self.sync.set_sync_fork_request(peers, hash, number)
 	}
 }
 
-impl<B: BlockT, N: Network<B>> Future for NetworkBridge<B, N> {
+impl<B: BlockT, N: Network<B>, S: Syncing<B>> Future for NetworkBridge<B, N, S> {
 	type Output = Result<(), Error>;
 
 	fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
@@ -661,10 +679,11 @@ fn incoming_global<B: BlockT>(
 		})
 }
 
-impl<B: BlockT, N: Network<B>> Clone for NetworkBridge<B, N> {
+impl<B: BlockT, N: Network<B>, S: Syncing<B>> Clone for NetworkBridge<B, N, S> {
 	fn clone(&self) -> Self {
 		NetworkBridge {
 			service: self.service.clone(),
+			sync: self.sync.clone(),
 			gossip_engine: self.gossip_engine.clone(),
 			validator: Arc::clone(&self.validator),
 			neighbor_sender: self.neighbor_sender.clone(),
diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs
index 21e2e978c87..843e5467910 100644
--- a/substrate/client/consensus/grandpa/src/communication/tests.rs
+++ b/substrate/client/consensus/grandpa/src/communication/tests.rs
@@ -33,6 +33,7 @@ use sc_network_common::{
 		NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
 		NetworkSyncForkRequest, NotificationSender, NotificationSenderError,
 	},
+	sync::{SyncEvent as SyncStreamEvent, SyncEventStream},
 };
 use sc_network_gossip::Validator;
 use sc_network_test::{Block, Hash};
@@ -153,6 +154,10 @@ impl NetworkNotification for TestNetwork {
 	) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
 		unimplemented!();
 	}
+
+	fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+		unimplemented!();
+	}
 }
 
 impl NetworkBlock<Hash, NumberFor<Block>> for TestNetwork {
@@ -186,8 +191,34 @@ impl sc_network_gossip::ValidatorContext<Block> for TestNetwork {
 	fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {}
 }
 
+#[derive(Clone)]
+pub(crate) struct TestSync;
+
+impl SyncEventStream for TestSync {
+	fn event_stream(
+		&self,
+		_name: &'static str,
+	) -> Pin<Box<dyn Stream<Item = SyncStreamEvent> + Send>> {
+		Box::pin(futures::stream::pending())
+	}
+}
+
+impl NetworkBlock<Hash, NumberFor<Block>> for TestSync {
+	fn announce_block(&self, _hash: Hash, _data: Option<Vec<u8>>) {
+		unimplemented!();
+	}
+
+	fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor<Block>) {
+		unimplemented!();
+	}
+}
+
+impl NetworkSyncForkRequest<Hash, NumberFor<Block>> for TestSync {
+	fn set_sync_fork_request(&self, _peers: Vec<PeerId>, _hash: Hash, _number: NumberFor<Block>) {}
+}
+
 pub(crate) struct Tester {
-	pub(crate) net_handle: super::NetworkBridge<Block, TestNetwork>,
+	pub(crate) net_handle: super::NetworkBridge<Block, TestNetwork, TestSync>,
 	gossip_validator: Arc<GossipValidator<Block>>,
 	pub(crate) events: TracingUnboundedReceiver<Event>,
 }
@@ -255,6 +286,7 @@ fn voter_set_state() -> SharedVoterSetState<Block> {
 pub(crate) fn make_test_network() -> (impl Future<Output = Tester>, TestNetwork) {
 	let (tx, rx) = tracing_unbounded("test", 100_000);
 	let net = TestNetwork { sender: tx };
+	let sync = TestSync {};
 
 	#[derive(Clone)]
 	struct Exit;
@@ -267,7 +299,8 @@ pub(crate) fn make_test_network() -> (impl Future<Output = Tester>, TestNetwork)
 		}
 	}
 
-	let bridge = super::NetworkBridge::new(net.clone(), config(), voter_set_state(), None, None);
+	let bridge =
+		super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None);
 
 	(
 		futures::future::ready(Tester {
@@ -370,6 +403,7 @@ fn good_commit_leads_to_relay() {
 						protocol: grandpa_protocol_name::NAME.into(),
 						negotiated_fallback: None,
 						role: ObservedRole::Full,
+						received_handshake: vec![],
 					});
 
 					let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived {
@@ -387,6 +421,7 @@ fn good_commit_leads_to_relay() {
 						protocol: grandpa_protocol_name::NAME.into(),
 						negotiated_fallback: None,
 						role: ObservedRole::Full,
+						received_handshake: vec![],
 					});
 
 					// Announce its local set has being on the current set id through a neighbor
@@ -519,6 +554,7 @@ fn bad_commit_leads_to_report() {
 						protocol: grandpa_protocol_name::NAME.into(),
 						negotiated_fallback: None,
 						role: ObservedRole::Full,
+						received_handshake: vec![],
 					});
 					let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived {
 						remote: sender_id,
diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs
index 34254266171..67820a59cc9 100644
--- a/substrate/client/consensus/grandpa/src/environment.rs
+++ b/substrate/client/consensus/grandpa/src/environment.rs
@@ -50,7 +50,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
 
 use crate::{
 	authorities::{AuthoritySet, SharedAuthoritySet},
-	communication::Network as NetworkT,
+	communication::{Network as NetworkT, Syncing as SyncingT},
 	justification::GrandpaJustification,
 	local_authority_id,
 	notification::GrandpaJustificationSender,
@@ -423,13 +423,21 @@ impl Metrics {
 }
 
 /// The environment we run GRANDPA in.
-pub(crate) struct Environment<Backend, Block: BlockT, C, N: NetworkT<Block>, SC, VR> {
+pub(crate) struct Environment<
+	Backend,
+	Block: BlockT,
+	C,
+	N: NetworkT<Block>,
+	S: SyncingT<Block>,
+	SC,
+	VR,
+> {
 	pub(crate) client: Arc<C>,
 	pub(crate) select_chain: SC,
 	pub(crate) voters: Arc<VoterSet<AuthorityId>>,
 	pub(crate) config: Config,
 	pub(crate) authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
-	pub(crate) network: crate::communication::NetworkBridge<Block, N>,
+	pub(crate) network: crate::communication::NetworkBridge<Block, N, S>,
 	pub(crate) set_id: SetId,
 	pub(crate) voter_set_state: SharedVoterSetState<Block>,
 	pub(crate) voting_rule: VR,
@@ -439,7 +447,9 @@ pub(crate) struct Environment<Backend, Block: BlockT, C, N: NetworkT<Block>, SC,
 	pub(crate) _phantom: PhantomData<Backend>,
 }
 
-impl<BE, Block: BlockT, C, N: NetworkT<Block>, SC, VR> Environment<BE, Block, C, N, SC, VR> {
+impl<BE, Block: BlockT, C, N: NetworkT<Block>, S: SyncingT<Block>, SC, VR>
+	Environment<BE, Block, C, N, S, SC, VR>
+{
 	/// Updates the voter set state using the given closure. The write lock is
 	/// held during evaluation of the closure and the environment's voter set
 	/// state is set to its result if successful.
@@ -469,13 +479,14 @@ impl<BE, Block: BlockT, C, N: NetworkT<Block>, SC, VR> Environment<BE, Block, C,
 	}
 }
 
-impl<BE, Block, C, N, SC, VR> Environment<BE, Block, C, N, SC, VR>
+impl<BE, Block, C, N, S, SC, VR> Environment<BE, Block, C, N, S, SC, VR>
 where
 	Block: BlockT,
 	BE: BackendT<Block>,
 	C: ClientForGrandpa<Block, BE>,
 	C::Api: GrandpaApi<Block>,
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	SC: SelectChainT<Block>,
 {
 	/// Report the given equivocation to the GRANDPA runtime module. This method
@@ -572,13 +583,14 @@ where
 	}
 }
 
-impl<BE, Block, C, N, SC, VR> finality_grandpa::Chain<Block::Hash, NumberFor<Block>>
-	for Environment<BE, Block, C, N, SC, VR>
+impl<BE, Block, C, N, S, SC, VR> finality_grandpa::Chain<Block::Hash, NumberFor<Block>>
+	for Environment<BE, Block, C, N, S, SC, VR>
 where
 	Block: BlockT,
 	BE: BackendT<Block>,
 	C: ClientForGrandpa<Block, BE>,
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	SC: SelectChainT<Block>,
 	VR: VotingRuleT<Block, C>,
 	NumberFor<Block>: BlockNumberOps,
@@ -630,14 +642,15 @@ where
 	Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect())
 }
 
-impl<B, Block, C, N, SC, VR> voter::Environment<Block::Hash, NumberFor<Block>>
-	for Environment<B, Block, C, N, SC, VR>
+impl<B, Block, C, N, S, SC, VR> voter::Environment<Block::Hash, NumberFor<Block>>
+	for Environment<B, Block, C, N, S, SC, VR>
 where
 	Block: BlockT,
 	B: BackendT<Block>,
 	C: ClientForGrandpa<Block, B> + 'static,
 	C::Api: GrandpaApi<Block>,
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	SC: SelectChainT<Block> + 'static,
 	VR: VotingRuleT<Block, C> + Clone + 'static,
 	NumberFor<Block>: BlockNumberOps,
diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs
index ef4beb51b4f..7bf48a498fa 100644
--- a/substrate/client/consensus/grandpa/src/lib.rs
+++ b/substrate/client/consensus/grandpa/src/lib.rs
@@ -141,7 +141,7 @@ pub use voting_rule::{
 };
 
 use aux_schema::PersistentData;
-use communication::{Network as NetworkT, NetworkBridge};
+use communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT};
 use environment::{Environment, VoterSetState};
 use until_imported::UntilGlobalMessageBlocksImported;
 
@@ -349,10 +349,11 @@ pub(crate) trait BlockSyncRequester<Block: BlockT> {
 	);
 }
 
-impl<Block, Network> BlockSyncRequester<Block> for NetworkBridge<Block, Network>
+impl<Block, Network, Syncing> BlockSyncRequester<Block> for NetworkBridge<Block, Network, Syncing>
 where
 	Block: BlockT,
 	Network: NetworkT<Block>,
+	Syncing: SyncingT<Block>,
 {
 	fn set_sync_fork_request(
 		&self,
@@ -617,11 +618,11 @@ where
 	))
 }
 
-fn global_communication<BE, Block: BlockT, C, N>(
+fn global_communication<BE, Block: BlockT, C, N, S>(
 	set_id: SetId,
 	voters: &Arc<VoterSet<AuthorityId>>,
 	client: Arc<C>,
-	network: &NetworkBridge<Block, N>,
+	network: &NetworkBridge<Block, N, S>,
 	keystore: Option<&SyncCryptoStorePtr>,
 	metrics: Option<until_imported::Metrics>,
 ) -> (
@@ -640,6 +641,7 @@ where
 	BE: Backend<Block> + 'static,
 	C: ClientForGrandpa<Block, BE> + 'static,
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	NumberFor<Block>: BlockNumberOps,
 {
 	let is_voter = local_authority_id(voters, keystore).is_some();
@@ -665,7 +667,7 @@ where
 }
 
 /// Parameters used to run Grandpa.
-pub struct GrandpaParams<Block: BlockT, C, N, SC, VR> {
+pub struct GrandpaParams<Block: BlockT, C, N, S, SC, VR> {
 	/// Configuration for the GRANDPA service.
 	pub config: Config,
 	/// A link to the block import worker.
@@ -676,6 +678,8 @@ pub struct GrandpaParams<Block: BlockT, C, N, SC, VR> {
 	/// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed
 	/// to the configuration of the networking. See [`grandpa_peers_set_config`].
 	pub network: N,
+	/// Event stream for syncing-related events.
+	pub sync: S,
 	/// A voting rule used to potentially restrict target votes.
 	pub voting_rule: VR,
 	/// The prometheus metrics registry.
@@ -710,13 +714,14 @@ pub fn grandpa_peers_set_config(
 
 /// Run a GRANDPA voter as a task. Provide configuration and a link to a
 /// block import worker that has already been instantiated with `block_import`.
-pub fn run_grandpa_voter<Block: BlockT, BE: 'static, C, N, SC, VR>(
-	grandpa_params: GrandpaParams<Block, C, N, SC, VR>,
+pub fn run_grandpa_voter<Block: BlockT, BE: 'static, C, N, S, SC, VR>(
+	grandpa_params: GrandpaParams<Block, C, N, S, SC, VR>,
 ) -> sp_blockchain::Result<impl Future<Output = ()> + Send>
 where
 	Block::Hash: Ord,
 	BE: Backend<Block> + 'static,
 	N: NetworkT<Block> + Sync + 'static,
+	S: SyncingT<Block> + Sync + 'static,
 	SC: SelectChain<Block> + 'static,
 	VR: VotingRule<Block, C> + Clone + 'static,
 	NumberFor<Block>: BlockNumberOps,
@@ -727,6 +732,7 @@ where
 		mut config,
 		link,
 		network,
+		sync,
 		voting_rule,
 		prometheus_registry,
 		shared_voter_state,
@@ -751,6 +757,7 @@ where
 
 	let network = NetworkBridge::new(
 		network,
+		sync,
 		config.clone(),
 		persistent_data.set_state.clone(),
 		prometheus_registry.as_ref(),
@@ -836,26 +843,27 @@ impl Metrics {
 
 /// Future that powers the voter.
 #[must_use]
-struct VoterWork<B, Block: BlockT, C, N: NetworkT<Block>, SC, VR> {
+struct VoterWork<B, Block: BlockT, C, N: NetworkT<Block>, S: SyncingT<Block>, SC, VR> {
 	voter: Pin<
 		Box<dyn Future<Output = Result<(), CommandOrError<Block::Hash, NumberFor<Block>>>> + Send>,
 	>,
 	shared_voter_state: SharedVoterState,
-	env: Arc<Environment<B, Block, C, N, SC, VR>>,
+	env: Arc<Environment<B, Block, C, N, S, SC, VR>>,
 	voter_commands_rx: TracingUnboundedReceiver<VoterCommand<Block::Hash, NumberFor<Block>>>,
-	network: NetworkBridge<Block, N>,
+	network: NetworkBridge<Block, N, S>,
 	telemetry: Option<TelemetryHandle>,
 	/// Prometheus metrics.
 	metrics: Option<Metrics>,
 }
 
-impl<B, Block, C, N, SC, VR> VoterWork<B, Block, C, N, SC, VR>
+impl<B, Block, C, N, S, SC, VR> VoterWork<B, Block, C, N, S, SC, VR>
 where
 	Block: BlockT,
 	B: Backend<Block> + 'static,
 	C: ClientForGrandpa<Block, B> + 'static,
 	C::Api: GrandpaApi<Block>,
 	N: NetworkT<Block> + Sync,
+	S: SyncingT<Block> + Sync,
 	NumberFor<Block>: BlockNumberOps,
 	SC: SelectChain<Block> + 'static,
 	VR: VotingRule<Block, C> + Clone + 'static,
@@ -863,7 +871,7 @@ where
 	fn new(
 		client: Arc<C>,
 		config: Config,
-		network: NetworkBridge<Block, N>,
+		network: NetworkBridge<Block, N, S>,
 		select_chain: SC,
 		voting_rule: VR,
 		persistent_data: PersistentData<Block>,
@@ -1072,11 +1080,12 @@ where
 	}
 }
 
-impl<B, Block, C, N, SC, VR> Future for VoterWork<B, Block, C, N, SC, VR>
+impl<B, Block, C, N, S, SC, VR> Future for VoterWork<B, Block, C, N, S, SC, VR>
 where
 	Block: BlockT,
 	B: Backend<Block> + 'static,
 	N: NetworkT<Block> + Sync,
+	S: SyncingT<Block> + Sync,
 	NumberFor<Block>: BlockNumberOps,
 	SC: SelectChain<Block> + 'static,
 	C: ClientForGrandpa<Block, B> + 'static,
diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs
index b382430ef3d..53672c1f022 100644
--- a/substrate/client/consensus/grandpa/src/observer.rs
+++ b/substrate/client/consensus/grandpa/src/observer.rs
@@ -39,7 +39,7 @@ use sp_runtime::traits::{Block as BlockT, NumberFor};
 use crate::{
 	authorities::SharedAuthoritySet,
 	aux_schema::PersistentData,
-	communication::{Network as NetworkT, NetworkBridge},
+	communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT},
 	environment, global_communication,
 	notification::GrandpaJustificationSender,
 	ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand,
@@ -163,14 +163,16 @@ where
 /// already been instantiated with `block_import`.
 /// NOTE: this is currently not part of the crate's public API since we don't consider
 /// it stable enough to use on a live network.
-pub fn run_grandpa_observer<BE, Block: BlockT, Client, N, SC>(
+pub fn run_grandpa_observer<BE, Block: BlockT, Client, N, S, SC>(
 	config: Config,
 	link: LinkHalf<Block, Client, SC>,
 	network: N,
+	sync: S,
 ) -> sp_blockchain::Result<impl Future<Output = ()> + Send>
 where
 	BE: Backend<Block> + Unpin + 'static,
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	SC: SelectChain<Block>,
 	NumberFor<Block>: BlockNumberOps,
 	Client: ClientForGrandpa<Block, BE> + 'static,
@@ -186,6 +188,7 @@ where
 
 	let network = NetworkBridge::new(
 		network,
+		sync,
 		config.clone(),
 		persistent_data.set_state.clone(),
 		None,
@@ -211,11 +214,11 @@ where
 
 /// Future that powers the observer.
 #[must_use]
-struct ObserverWork<B: BlockT, BE, Client, N: NetworkT<B>> {
+struct ObserverWork<B: BlockT, BE, Client, N: NetworkT<B>, S: SyncingT<B>> {
 	observer:
 		Pin<Box<dyn Future<Output = Result<(), CommandOrError<B::Hash, NumberFor<B>>>> + Send>>,
 	client: Arc<Client>,
-	network: NetworkBridge<B, N>,
+	network: NetworkBridge<B, N, S>,
 	persistent_data: PersistentData<B>,
 	keystore: Option<SyncCryptoStorePtr>,
 	voter_commands_rx: TracingUnboundedReceiver<VoterCommand<B::Hash, NumberFor<B>>>,
@@ -224,17 +227,18 @@ struct ObserverWork<B: BlockT, BE, Client, N: NetworkT<B>> {
 	_phantom: PhantomData<BE>,
 }
 
-impl<B, BE, Client, Network> ObserverWork<B, BE, Client, Network>
+impl<B, BE, Client, Network, Syncing> ObserverWork<B, BE, Client, Network, Syncing>
 where
 	B: BlockT,
 	BE: Backend<B> + 'static,
 	Client: ClientForGrandpa<B, BE> + 'static,
 	Network: NetworkT<B>,
+	Syncing: SyncingT<B>,
 	NumberFor<B>: BlockNumberOps,
 {
 	fn new(
 		client: Arc<Client>,
-		network: NetworkBridge<B, Network>,
+		network: NetworkBridge<B, Network, Syncing>,
 		persistent_data: PersistentData<B>,
 		keystore: Option<SyncCryptoStorePtr>,
 		voter_commands_rx: TracingUnboundedReceiver<VoterCommand<B::Hash, NumberFor<B>>>,
@@ -347,12 +351,13 @@ where
 	}
 }
 
-impl<B, BE, C, N> Future for ObserverWork<B, BE, C, N>
+impl<B, BE, C, N, S> Future for ObserverWork<B, BE, C, N, S>
 where
 	B: BlockT,
 	BE: Backend<B> + Unpin + 'static,
 	C: ClientForGrandpa<B, BE> + 'static,
 	N: NetworkT<B>,
+	S: SyncingT<B>,
 	NumberFor<B>: BlockNumberOps,
 {
 	type Output = Result<(), Error>;
diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs
index 7cc5e75fa14..f7747a20301 100644
--- a/substrate/client/consensus/grandpa/src/tests.rs
+++ b/substrate/client/consensus/grandpa/src/tests.rs
@@ -145,6 +145,10 @@ impl TestNetFactory for GrandpaTestNet {
 		&self.peers
 	}
 
+	fn peers_mut(&mut self) -> &mut Vec<GrandpaPeer> {
+		&mut self.peers
+	}
+
 	fn mut_peers<F: FnOnce(&mut Vec<GrandpaPeer>)>(&mut self, closure: F) {
 		closure(&mut self.peers);
 	}
@@ -310,6 +314,7 @@ fn initialize_grandpa(
 				net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed");
 			(net.peers[peer_id].network_service().clone(), link)
 		};
+		let sync = net.peers[peer_id].sync_service().clone();
 
 		let grandpa_params = GrandpaParams {
 			config: Config {
@@ -324,6 +329,7 @@ fn initialize_grandpa(
 			},
 			link,
 			network: net_service,
+			sync,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -451,6 +457,7 @@ async fn finalize_3_voters_1_full_observer() {
 	tokio::spawn({
 		let peer_id = 3;
 		let net_service = net.peers[peer_id].network_service().clone();
+		let sync = net.peers[peer_id].sync_service().clone();
 		let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed");
 
 		let grandpa_params = GrandpaParams {
@@ -466,6 +473,7 @@ async fn finalize_3_voters_1_full_observer() {
 			},
 			link,
 			network: net_service,
+			sync,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -533,11 +541,15 @@ async fn transition_3_voters_twice_1_full_observer() {
 	for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() {
 		let keystore = create_keystore(local_key);
 
-		let (net_service, link) = {
+		let (net_service, link, sync) = {
 			let net = net.lock();
 			let link =
 				net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed");
-			(net.peers[peer_id].network_service().clone(), link)
+			(
+				net.peers[peer_id].network_service().clone(),
+				link,
+				net.peers[peer_id].sync_service().clone(),
+			)
 		};
 
 		let grandpa_params = GrandpaParams {
@@ -553,6 +565,7 @@ async fn transition_3_voters_twice_1_full_observer() {
 			},
 			link,
 			network: net_service,
+			sync,
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -999,6 +1012,7 @@ async fn voter_persists_its_votes() {
 
 		communication::NetworkBridge::new(
 			net.peers[1].network_service().clone(),
+			net.peers[1].sync_service().clone(),
 			config.clone(),
 			set_state,
 			None,
@@ -1016,6 +1030,7 @@ async fn voter_persists_its_votes() {
 			let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed");
 			(net.peers[0].network_service().clone(), link)
 		};
+		let sync = net.peers[0].sync_service().clone();
 
 		let grandpa_params = GrandpaParams {
 			config: Config {
@@ -1030,6 +1045,7 @@ async fn voter_persists_its_votes() {
 			},
 			link,
 			network: net_service,
+			sync,
 			voting_rule: VotingRulesBuilder::default().build(),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1050,6 +1066,7 @@ async fn voter_persists_its_votes() {
 		// the network service of this new peer
 		net.add_authority_peer();
 		let net_service = net.peers[2].network_service().clone();
+		let sync = net.peers[2].sync_service().clone();
 		// but we'll reuse the client from the first peer (alice_voter1)
 		// since we want to share the same database, so that we can
 		// read the persisted state after aborting alice_voter1.
@@ -1071,6 +1088,7 @@ async fn voter_persists_its_votes() {
 			},
 			link,
 			network: net_service,
+			sync,
 			voting_rule: VotingRulesBuilder::default().build(),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1232,6 +1250,7 @@ async fn finalize_3_voters_1_light_observer() {
 		},
 		net.peers[3].data.lock().take().expect("link initialized at startup; qed"),
 		net.peers[3].network_service().clone(),
+		net.peers[3].sync_service().clone(),
 	)
 	.unwrap();
 	net.peer(0).push_blocks(20, false);
@@ -1265,6 +1284,7 @@ async fn voter_catches_up_to_latest_round_when_behind() {
 	             link,
 	             net: Arc<Mutex<GrandpaTestNet>>|
 	 -> Pin<Box<dyn Future<Output = ()> + Send>> {
+		let mut net = net.lock();
 		let grandpa_params = GrandpaParams {
 			config: Config {
 				gossip_duration: TEST_GOSSIP_DURATION,
@@ -1277,7 +1297,8 @@ async fn voter_catches_up_to_latest_round_when_behind() {
 				protocol_name: grandpa_protocol_name::NAME.into(),
 			},
 			link,
-			network: net.lock().peer(peer_id).network_service().clone(),
+			network: net.peer(peer_id).network_service().clone(),
+			sync: net.peer(peer_id).sync_service().clone(),
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
@@ -1359,18 +1380,20 @@ async fn voter_catches_up_to_latest_round_when_behind() {
 	future::select(test, drive_to_completion).await;
 }
 
-type TestEnvironment<N, SC, VR> =
-	Environment<substrate_test_runtime_client::Backend, Block, TestClient, N, SC, VR>;
+type TestEnvironment<N, S, SC, VR> =
+	Environment<substrate_test_runtime_client::Backend, Block, TestClient, N, S, SC, VR>;
 
-fn test_environment_with_select_chain<N, VR, SC>(
+fn test_environment_with_select_chain<N, S, VR, SC>(
 	link: &TestLinkHalf,
 	keystore: Option<SyncCryptoStorePtr>,
 	network_service: N,
+	sync_service: S,
 	select_chain: SC,
 	voting_rule: VR,
-) -> TestEnvironment<N, SC, VR>
+) -> TestEnvironment<N, S, SC, VR>
 where
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	VR: VotingRule<Block, TestClient>,
 {
 	let PersistentData { ref authority_set, ref set_state, .. } = link.persistent_data;
@@ -1386,8 +1409,14 @@ where
 		protocol_name: grandpa_protocol_name::NAME.into(),
 	};
 
-	let network =
-		NetworkBridge::new(network_service.clone(), config.clone(), set_state.clone(), None, None);
+	let network = NetworkBridge::new(
+		network_service.clone(),
+		sync_service,
+		config.clone(),
+		set_state.clone(),
+		None,
+		None,
+	);
 
 	Environment {
 		authority_set: authority_set.clone(),
@@ -1406,20 +1435,23 @@ where
 	}
 }
 
-fn test_environment<N, VR>(
+fn test_environment<N, S, VR>(
 	link: &TestLinkHalf,
 	keystore: Option<SyncCryptoStorePtr>,
 	network_service: N,
+	sync_service: S,
 	voting_rule: VR,
-) -> TestEnvironment<N, LongestChain<substrate_test_runtime_client::Backend, Block>, VR>
+) -> TestEnvironment<N, S, LongestChain<substrate_test_runtime_client::Backend, Block>, VR>
 where
 	N: NetworkT<Block>,
+	S: SyncingT<Block>,
 	VR: VotingRule<Block, TestClient>,
 {
 	test_environment_with_select_chain(
 		link,
 		keystore,
 		network_service,
+		sync_service,
 		link.select_chain.clone(),
 		voting_rule,
 	)
@@ -1435,19 +1467,22 @@ async fn grandpa_environment_respects_voting_rules() {
 	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
+	let sync_service = peer.sync_service().clone();
 	let link = peer.data.lock().take().unwrap();
 
 	// add 21 blocks
 	let hashes = peer.push_blocks(21, false);
 
 	// create an environment with no voting rule restrictions
-	let unrestricted_env = test_environment(&link, None, network_service.clone(), ());
+	let unrestricted_env =
+		test_environment(&link, None, network_service.clone(), sync_service.clone(), ());
 
 	// another with 3/4 unfinalized chain voting rule restriction
 	let three_quarters_env = test_environment(
 		&link,
 		None,
 		network_service.clone(),
+		sync_service.clone(),
 		voting_rule::ThreeQuartersOfTheUnfinalizedChain,
 	);
 
@@ -1457,6 +1492,7 @@ async fn grandpa_environment_respects_voting_rules() {
 		&link,
 		None,
 		network_service.clone(),
+		sync_service,
 		VotingRulesBuilder::default().build(),
 	);
 
@@ -1549,6 +1585,7 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() {
 	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
+	let sync_service = peer.sync_service().clone();
 	let link = peer.data.lock().take().unwrap();
 	let client = peer.client().as_client().clone();
 	let select_chain = MockSelectChain::default();
@@ -1562,6 +1599,7 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() {
 		&link,
 		None,
 		network_service.clone(),
+		sync_service,
 		select_chain.clone(),
 		voting_rule::BeforeBestBlockBy(5),
 	);
@@ -1607,6 +1645,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ
 	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
+	let sync_service = peer.sync_service().clone();
 	let link = peer.data.lock().take().unwrap();
 	let client = peer.client().as_client().clone();
 	let select_chain = MockSelectChain::default();
@@ -1615,6 +1654,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ
 		&link,
 		None,
 		network_service.clone(),
+		sync_service.clone(),
 		select_chain.clone(),
 		voting_rule.clone(),
 	);
@@ -1717,10 +1757,12 @@ async fn grandpa_environment_never_overwrites_round_voter_state() {
 	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
+	let sync_service = peer.sync_service().clone();
 	let link = peer.data.lock().take().unwrap();
 
 	let keystore = create_keystore(peers[0]);
-	let environment = test_environment(&link, Some(keystore), network_service.clone(), ());
+	let environment =
+		test_environment(&link, Some(keystore), network_service.clone(), sync_service, ());
 
 	let round_state = || finality_grandpa::round::State::genesis(Default::default());
 	let base = || Default::default();
@@ -1921,9 +1963,10 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() {
 		let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
 		let peer = net.peer(0);
 		let network_service = peer.network_service().clone();
+		let sync_service = peer.sync_service().clone();
 		let link = peer.data.lock().take().unwrap();
 		let keystore = create_keystore(alice);
-		test_environment(&link, Some(keystore), network_service.clone(), ())
+		test_environment(&link, Some(keystore), network_service.clone(), sync_service, ())
 	};
 
 	let signed_prevote = {
diff --git a/substrate/client/informant/src/display.rs b/substrate/client/informant/src/display.rs
index 85be926d99b..fc68e5603c1 100644
--- a/substrate/client/informant/src/display.rs
+++ b/substrate/client/informant/src/display.rs
@@ -24,7 +24,7 @@ use sc_network_common::{
 	service::NetworkStatus,
 	sync::{
 		warp::{WarpSyncPhase, WarpSyncProgress},
-		SyncState,
+		SyncState, SyncStatus,
 	},
 };
 use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero};
@@ -69,7 +69,12 @@ impl<B: BlockT> InformantDisplay<B> {
 	}
 
 	/// Displays the informant by calling `info!`.
-	pub fn display(&mut self, info: &ClientInfo<B>, net_status: NetworkStatus<B>) {
+	pub fn display(
+		&mut self,
+		info: &ClientInfo<B>,
+		net_status: NetworkStatus,
+		sync_status: SyncStatus<B>,
+	) {
 		let best_number = info.chain.best_number;
 		let best_hash = info.chain.best_hash;
 		let finalized_number = info.chain.finalized_number;
@@ -94,7 +99,7 @@ impl<B: BlockT> InformantDisplay<B> {
 		};
 
 		let (level, status, target) =
-			match (net_status.sync_state, net_status.state_sync, net_status.warp_sync) {
+			match (sync_status.state, sync_status.state_sync, sync_status.warp_sync) {
 				(
 					_,
 					_,
diff --git a/substrate/client/informant/src/lib.rs b/substrate/client/informant/src/lib.rs
index cb30d08c452..dc6aebc2ed6 100644
--- a/substrate/client/informant/src/lib.rs
+++ b/substrate/client/informant/src/lib.rs
@@ -23,7 +23,7 @@ use futures::prelude::*;
 use futures_timer::Delay;
 use log::{debug, info, trace};
 use sc_client_api::{BlockchainEvents, UsageProvider};
-use sc_network_common::service::NetworkStatusProvider;
+use sc_network_common::{service::NetworkStatusProvider, sync::SyncStatusProvider};
 use sp_blockchain::HeaderMetadata;
 use sp_runtime::traits::{Block as BlockT, Header};
 use std::{collections::VecDeque, fmt::Display, sync::Arc, time::Duration};
@@ -51,9 +51,10 @@ impl Default for OutputFormat {
 }
 
 /// Builds the informant and returns a `Future` that drives the informant.
-pub async fn build<B: BlockT, C, N>(client: Arc<C>, network: N, format: OutputFormat)
+pub async fn build<B: BlockT, C, N, S>(client: Arc<C>, network: N, syncing: S, format: OutputFormat)
 where
-	N: NetworkStatusProvider<B>,
+	N: NetworkStatusProvider,
+	S: SyncStatusProvider<B>,
 	C: UsageProvider<B> + HeaderMetadata<B> + BlockchainEvents<B>,
 	<C as HeaderMetadata<B>>::Error: Display,
 {
@@ -63,10 +64,15 @@ where
 
 	let display_notifications = interval(Duration::from_millis(5000))
 		.filter_map(|_| async {
-			let status = network.status().await;
-			status.ok()
+			let net_status = network.status().await;
+			let sync_status = syncing.status().await;
+
+			match (net_status.ok(), sync_status.ok()) {
+				(Some(net), Some(sync)) => Some((net, sync)),
+				_ => None,
+			}
 		})
-		.for_each(move |net_status| {
+		.for_each(move |(net_status, sync_status)| {
 			let info = client_1.usage_info();
 			if let Some(ref usage) = info.usage {
 				trace!(target: "usage", "Usage statistics: {}", usage);
@@ -76,7 +82,7 @@ where
 					"Usage statistics not displayed as backend does not provide it",
 				)
 			}
-			display.display(&info, net_status);
+			display.display(&info, net_status, sync_status);
 			future::ready(())
 		});
 
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 129e18d8a58..7368cc77030 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -18,10 +18,13 @@
 
 use crate::{
 	state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL},
-	Network, Validator,
+	Network, Syncing, Validator,
 };
 
-use sc_network_common::protocol::{event::Event, ProtocolName};
+use sc_network_common::{
+	protocol::{event::Event, ProtocolName},
+	sync::SyncEvent,
+};
 use sc_peerset::ReputationChange;
 
 use futures::{
@@ -44,11 +47,14 @@ use std::{
 pub struct GossipEngine<B: BlockT> {
 	state_machine: ConsensusGossip<B>,
 	network: Box<dyn Network<B> + Send>,
+	sync: Box<dyn Syncing<B>>,
 	periodic_maintenance_interval: futures_timer::Delay,
 	protocol: ProtocolName,
 
 	/// Incoming events from the network.
 	network_event_stream: Pin<Box<dyn Stream<Item = Event> + Send>>,
+	/// Incoming events from the syncing service.
+	sync_event_stream: Pin<Box<dyn Stream<Item = SyncEvent> + Send>>,
 	/// Outgoing events to the consumer.
 	message_sinks: HashMap<B::Hash, Vec<Sender<TopicNotification>>>,
 	/// Buffered messages (see [`ForwardingState`]).
@@ -75,25 +81,31 @@ impl<B: BlockT> Unpin for GossipEngine<B> {}
 
 impl<B: BlockT> GossipEngine<B> {
 	/// Create a new instance.
-	pub fn new<N: Network<B> + Send + Clone + 'static>(
+	pub fn new<N, S>(
 		network: N,
+		sync: S,
 		protocol: impl Into<ProtocolName>,
 		validator: Arc<dyn Validator<B>>,
 		metrics_registry: Option<&Registry>,
 	) -> Self
 	where
 		B: 'static,
+		N: Network<B> + Send + Clone + 'static,
+		S: Syncing<B> + Send + Clone + 'static,
 	{
 		let protocol = protocol.into();
 		let network_event_stream = network.event_stream("network-gossip");
+		let sync_event_stream = sync.event_stream("network-gossip");
 
 		GossipEngine {
 			state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry),
 			network: Box::new(network),
+			sync: Box::new(sync),
 			periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL),
 			protocol,
 
 			network_event_stream,
+			sync_event_stream,
 			message_sinks: HashMap::new(),
 			forwarding_state: ForwardingState::Idle,
 
@@ -162,7 +174,7 @@ impl<B: BlockT> GossipEngine<B> {
 	/// Note: this method isn't strictly related to gossiping and should eventually be moved
 	/// somewhere else.
 	pub fn announce(&self, block: B::Hash, associated_data: Option<Vec<u8>>) {
-		self.network.announce_block(block, associated_data);
+		self.sync.announce_block(block, associated_data);
 	}
 }
 
@@ -175,28 +187,24 @@ impl<B: BlockT> Future for GossipEngine<B> {
 		'outer: loop {
 			match &mut this.forwarding_state {
 				ForwardingState::Idle => {
-					match this.network_event_stream.poll_next_unpin(cx) {
+					let net_event_stream = this.network_event_stream.poll_next_unpin(cx);
+					let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx);
+
+					if net_event_stream.is_pending() && sync_event_stream.is_pending() {
+						break
+					}
+
+					match net_event_stream {
 						Poll::Ready(Some(event)) => match event {
-							Event::SyncConnected { remote } => {
-								this.network.add_set_reserved(remote, this.protocol.clone());
-							},
-							Event::SyncDisconnected { remote } => {
-								this.network.remove_peers_from_reserved_set(
-									this.protocol.clone(),
-									vec![remote],
-								);
-							},
-							Event::NotificationStreamOpened { remote, protocol, role, .. } => {
-								if protocol != this.protocol {
-									continue
-								}
-								this.state_machine.new_peer(&mut *this.network, remote, role);
-							},
+							Event::NotificationStreamOpened { remote, protocol, role, .. } =>
+								if protocol == this.protocol {
+									this.state_machine.new_peer(&mut *this.network, remote, role);
+								},
 							Event::NotificationStreamClosed { remote, protocol } => {
-								if protocol != this.protocol {
-									continue
+								if protocol == this.protocol {
+									this.state_machine
+										.peer_disconnected(&mut *this.network, remote);
 								}
-								this.state_machine.peer_disconnected(&mut *this.network, remote);
 							},
 							Event::NotificationsReceived { remote, messages } => {
 								let messages = messages
@@ -225,7 +233,25 @@ impl<B: BlockT> Future for GossipEngine<B> {
 							self.is_terminated = true;
 							return Poll::Ready(())
 						},
-						Poll::Pending => break,
+						Poll::Pending => {},
+					}
+
+					match sync_event_stream {
+						Poll::Ready(Some(event)) => match event {
+							SyncEvent::PeerConnected(remote) =>
+								this.network.add_set_reserved(remote, this.protocol.clone()),
+							SyncEvent::PeerDisconnected(remote) =>
+								this.network.remove_peers_from_reserved_set(
+									this.protocol.clone(),
+									vec![remote],
+								),
+						},
+						// The sync event stream closed. Do the same for [`GossipValidator`].
+						Poll::Ready(None) => {
+							self.is_terminated = true;
+							return Poll::Ready(())
+						},
+						Poll::Pending => {},
 					}
 				},
 				ForwardingState::Busy(to_forward) => {
@@ -321,6 +347,7 @@ mod tests {
 			NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers,
 			NotificationSender, NotificationSenderError,
 		},
+		sync::SyncEventStream,
 	};
 	use sp_runtime::{
 		testing::H256,
@@ -433,6 +460,10 @@ mod tests {
 		) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
 			unimplemented!();
 		}
+
+		fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+			unimplemented!();
+		}
 	}
 
 	impl NetworkBlock<<Block as BlockT>::Hash, NumberFor<Block>> for TestNetwork {
@@ -449,6 +480,42 @@ mod tests {
 		}
 	}
 
+	#[derive(Clone, Default)]
+	struct TestSync {
+		inner: Arc<Mutex<TestSyncInner>>,
+	}
+
+	#[derive(Clone, Default)]
+	struct TestSyncInner {
+		event_senders: Vec<UnboundedSender<SyncEvent>>,
+	}
+
+	impl SyncEventStream for TestSync {
+		fn event_stream(
+			&self,
+			_name: &'static str,
+		) -> Pin<Box<dyn Stream<Item = SyncEvent> + Send>> {
+			let (tx, rx) = unbounded();
+			self.inner.lock().unwrap().event_senders.push(tx);
+
+			Box::pin(rx)
+		}
+	}
+
+	impl NetworkBlock<<Block as BlockT>::Hash, NumberFor<Block>> for TestSync {
+		fn announce_block(&self, _hash: <Block as BlockT>::Hash, _data: Option<Vec<u8>>) {
+			unimplemented!();
+		}
+
+		fn new_best_block_imported(
+			&self,
+			_hash: <Block as BlockT>::Hash,
+			_number: NumberFor<Block>,
+		) {
+			unimplemented!();
+		}
+	}
+
 	struct AllowAll;
 	impl Validator<Block> for AllowAll {
 		fn validate(
@@ -468,8 +535,10 @@ mod tests {
 	#[test]
 	fn returns_when_network_event_stream_closes() {
 		let network = TestNetwork::default();
+		let sync = Arc::new(TestSync::default());
 		let mut gossip_engine = GossipEngine::<Block>::new(
 			network.clone(),
+			sync,
 			"/my_protocol",
 			Arc::new(AllowAll {}),
 			None,
@@ -495,9 +564,11 @@ mod tests {
 		let protocol = ProtocolName::from("/my_protocol");
 		let remote_peer = PeerId::random();
 		let network = TestNetwork::default();
+		let sync = Arc::new(TestSync::default());
 
 		let mut gossip_engine = GossipEngine::<Block>::new(
 			network.clone(),
+			sync.clone(),
 			protocol.clone(),
 			Arc::new(AllowAll {}),
 			None,
@@ -512,6 +583,7 @@ mod tests {
 				protocol: protocol.clone(),
 				negotiated_fallback: None,
 				role: ObservedRole::Authority,
+				received_handshake: vec![],
 			})
 			.expect("Event stream is unbounded; qed.");
 
@@ -614,6 +686,7 @@ mod tests {
 			let protocol = ProtocolName::from("/my_protocol");
 			let remote_peer = PeerId::random();
 			let network = TestNetwork::default();
+			let sync = Arc::new(TestSync::default());
 
 			let num_channels_per_topic = channels.iter().fold(
 				HashMap::new(),
@@ -640,6 +713,7 @@ mod tests {
 
 			let mut gossip_engine = GossipEngine::<Block>::new(
 				network.clone(),
+				sync.clone(),
 				protocol.clone(),
 				Arc::new(TestValidator {}),
 				None,
@@ -674,6 +748,7 @@ mod tests {
 					protocol: protocol.clone(),
 					negotiated_fallback: None,
 					role: ObservedRole::Authority,
+					received_handshake: vec![],
 				})
 				.expect("Event stream is unbounded; qed.");
 
diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs
index 859ec3d528d..e3448ba01c3 100644
--- a/substrate/client/network-gossip/src/lib.rs
+++ b/substrate/client/network-gossip/src/lib.rs
@@ -71,6 +71,7 @@ use libp2p::{multiaddr, PeerId};
 use sc_network_common::{
 	protocol::ProtocolName,
 	service::{NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers},
+	sync::SyncEventStream,
 };
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 use std::iter;
@@ -80,9 +81,7 @@ mod state_machine;
 mod validator;
 
 /// Abstraction over a network.
-pub trait Network<B: BlockT>:
-	NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock<B::Hash, NumberFor<B>>
-{
+pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream + NetworkNotification {
 	fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
 		let addr =
 			iter::once(multiaddr::Protocol::P2p(who.into())).collect::<multiaddr::Multiaddr>();
@@ -93,10 +92,9 @@ pub trait Network<B: BlockT>:
 	}
 }
 
-impl<T, B: BlockT> Network<B> for T where
-	T: NetworkPeers
-		+ NetworkEventStream
-		+ NetworkNotification
-		+ NetworkBlock<B::Hash, NumberFor<B>>
-{
-}
+impl<T, B: BlockT> Network<B> for T where T: NetworkPeers + NetworkEventStream + NetworkNotification {}
+
+/// Abstraction over the syncing subsystem.
+pub trait Syncing<B: BlockT>: SyncEventStream + NetworkBlock<B::Hash, NumberFor<B>> {}
+
+impl<T, B: BlockT> Syncing<B> for T where T: SyncEventStream + NetworkBlock<B::Hash, NumberFor<B>> {}
diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs
index 7cd5b5613b0..3a0d5fc0de1 100644
--- a/substrate/client/network-gossip/src/state_machine.rs
+++ b/substrate/client/network-gossip/src/state_machine.rs
@@ -683,6 +683,10 @@ mod tests {
 		) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
 			unimplemented!();
 		}
+
+		fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
+			unimplemented!();
+		}
 	}
 
 	impl NetworkBlock<<Block as BlockT>::Hash, NumberFor<Block>> for NoOpNetwork {
diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml
index 06ff00ef77d..c5f9d6d4cb0 100644
--- a/substrate/client/network/common/Cargo.toml
+++ b/substrate/client/network/common/Cargo.toml
@@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 prost-build = "0.11"
 
 [dependencies]
+array-bytes = "4.1"
 async-trait = "0.1.57"
 bitflags = "1.3.2"
 bytes = "1"
@@ -30,9 +31,14 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.
 smallvec = "1.8.0"
 sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" }
 sc-peerset = { version = "4.0.0-dev", path = "../../peerset" }
+sc-utils = { version = "4.0.0-dev", path = "../../utils" }
 serde = { version = "1.0.136", features = ["derive"] }
 sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" }
 sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" }
 sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" }
 sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" }
 thiserror = "1.0"
+zeroize = "1.4.3"
+
+[dev-dependencies]
+tempfile = "3.1.0"
diff --git a/substrate/client/network/common/src/config.rs b/substrate/client/network/common/src/config.rs
index 782b5a96513..6a02129fcfe 100644
--- a/substrate/client/network/common/src/config.rs
+++ b/substrate/client/network/common/src/config.rs
@@ -18,11 +18,33 @@
 
 //! Configuration of the networking layer.
 
-use crate::protocol;
+pub use crate::{
+	protocol::{self, role::Role},
+	request_responses::{
+		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
+	},
+	sync::warp::WarpSyncProvider,
+	ExHashT,
+};
+pub use libp2p::{build_multiaddr, core::PublicKey, identity};
 
 use codec::Encode;
-use libp2p::{multiaddr, Multiaddr, PeerId};
-use std::{fmt, str, str::FromStr};
+use libp2p::{
+	identity::{ed25519, Keypair},
+	multiaddr, Multiaddr, PeerId,
+};
+use zeroize::Zeroize;
+
+use std::{
+	error::Error,
+	fmt, fs,
+	io::{self, Write},
+	iter,
+	net::Ipv4Addr,
+	path::{Path, PathBuf},
+	str,
+	str::FromStr,
+};
 
 /// Protocol name prefix, transmitted on the wire for legacy protocol names.
 /// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8.
@@ -331,3 +353,350 @@ impl NonReservedPeerMode {
 		}
 	}
 }
+
+/// Sync operation mode.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum SyncMode {
+	/// Full block download and verification.
+	Full,
+	/// Download blocks and the latest state.
+	Fast {
+		/// Skip state proof download and verification.
+		skip_proofs: bool,
+		/// Download indexed transactions for recent blocks.
+		storage_chain_mode: bool,
+	},
+	/// Warp sync - verify authority set transitions and the latest state.
+	Warp,
+}
+
+impl SyncMode {
+	/// Returns if `self` is [`Self::Warp`].
+	pub fn is_warp(&self) -> bool {
+		matches!(self, Self::Warp)
+	}
+
+	/// Returns if `self` is [`Self::Fast`].
+	pub fn is_fast(&self) -> bool {
+		matches!(self, Self::Fast { .. })
+	}
+}
+
+impl Default for SyncMode {
+	fn default() -> Self {
+		Self::Full
+	}
+}
+
+/// Network service configuration.
+#[derive(Clone, Debug)]
+pub struct NetworkConfiguration {
+	/// Directory path to store network-specific configuration. None means nothing will be saved.
+	pub net_config_path: Option<PathBuf>,
+	/// Multiaddresses to listen for incoming connections.
+	pub listen_addresses: Vec<Multiaddr>,
+	/// Multiaddresses to advertise. Detected automatically if empty.
+	pub public_addresses: Vec<Multiaddr>,
+	/// List of initial node addresses
+	pub boot_nodes: Vec<MultiaddrWithPeerId>,
+	/// The node key configuration, which determines the node's network identity keypair.
+	pub node_key: NodeKeyConfig,
+	/// List of request-response protocols that the node supports.
+	pub request_response_protocols: Vec<RequestResponseConfig>,
+	/// Configuration for the default set of nodes used for block syncing and transactions.
+	pub default_peers_set: SetConfig,
+	/// Number of substreams to reserve for full nodes for block syncing and transactions.
+	/// Any other slot will be dedicated to light nodes.
+	///
+	/// This value is implicitly capped to `default_set.out_peers + default_set.in_peers`.
+	pub default_peers_set_num_full: u32,
+	/// Configuration for extra sets of nodes.
+	pub extra_sets: Vec<NonDefaultSetConfig>,
+	/// Client identifier. Sent over the wire for debugging purposes.
+	pub client_version: String,
+	/// Name of the node. Sent over the wire for debugging purposes.
+	pub node_name: String,
+	/// Configuration for the transport layer.
+	pub transport: TransportConfig,
+	/// Maximum number of peers to ask the same blocks in parallel.
+	pub max_parallel_downloads: u32,
+	/// Initial syncing mode.
+	pub sync_mode: SyncMode,
+
+	/// True if Kademlia random discovery should be enabled.
+	///
+	/// If true, the node will automatically randomly walk the DHT in order to find new peers.
+	pub enable_dht_random_walk: bool,
+
+	/// Should we insert non-global addresses into the DHT?
+	pub allow_non_globals_in_dht: bool,
+
+	/// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in
+	/// the presence of potentially adversarial nodes.
+	pub kademlia_disjoint_query_paths: bool,
+	/// Enable serving block data over IPFS bitswap.
+	pub ipfs_server: bool,
+
+	/// Size of Yamux receive window of all substreams. `None` for the default (256kiB).
+	/// Any value less than 256kiB is invalid.
+	///
+	/// # Context
+	///
+	/// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes
+	/// to be transferred at a time, where `N` is the Yamux receive window size configurable here.
+	/// This means, in practice, that every `N` bytes must be acknowledged by the receiver before
+	/// the sender can send more data. The maximum bandwidth of each notifications substream is
+	/// therefore `N / round_trip_time`.
+	///
+	/// It is recommended to leave this to `None`, and use a request-response protocol instead if
+	/// a large amount of data must be transferred. The reason why the value is configurable is
+	/// that some Substrate users mis-use notification protocols to send large amounts of data.
+	/// As such, this option isn't designed to stay and will likely get removed in the future.
+	///
+	/// Note that configuring a value here isn't a modification of the Yamux protocol, but rather
+	/// a modification of the way the implementation works. Different nodes with different
+	/// configured values remain compatible with each other.
+	pub yamux_window_size: Option<u32>,
+}
+
+impl NetworkConfiguration {
+	/// Create new default configuration
+	pub fn new<SN: Into<String>, SV: Into<String>>(
+		node_name: SN,
+		client_version: SV,
+		node_key: NodeKeyConfig,
+		net_config_path: Option<PathBuf>,
+	) -> Self {
+		let default_peers_set = SetConfig::default();
+		Self {
+			net_config_path,
+			listen_addresses: Vec::new(),
+			public_addresses: Vec::new(),
+			boot_nodes: Vec::new(),
+			node_key,
+			request_response_protocols: Vec::new(),
+			default_peers_set_num_full: default_peers_set.in_peers + default_peers_set.out_peers,
+			default_peers_set,
+			extra_sets: Vec::new(),
+			client_version: client_version.into(),
+			node_name: node_name.into(),
+			transport: TransportConfig::Normal { enable_mdns: false, allow_private_ip: true },
+			max_parallel_downloads: 5,
+			sync_mode: SyncMode::Full,
+			enable_dht_random_walk: true,
+			allow_non_globals_in_dht: false,
+			kademlia_disjoint_query_paths: false,
+			yamux_window_size: None,
+			ipfs_server: false,
+		}
+	}
+
+	/// Create new default configuration for localhost-only connection with random port (useful for
+	/// testing)
+	pub fn new_local() -> NetworkConfiguration {
+		let mut config =
+			NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
+
+		config.listen_addresses =
+			vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
+				.chain(iter::once(multiaddr::Protocol::Tcp(0)))
+				.collect()];
+
+		config.allow_non_globals_in_dht = true;
+		config
+	}
+
+	/// Create new default configuration for localhost-only connection with random port (useful for
+	/// testing)
+	pub fn new_memory() -> NetworkConfiguration {
+		let mut config =
+			NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
+
+		config.listen_addresses =
+			vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
+				.chain(iter::once(multiaddr::Protocol::Tcp(0)))
+				.collect()];
+
+		config.allow_non_globals_in_dht = true;
+		config
+	}
+}
+
+/// The configuration of a node's secret key, describing the type of key
+/// and how it is obtained. A node's identity keypair is the result of
+/// the evaluation of the node key configuration.
+#[derive(Clone, Debug)]
+pub enum NodeKeyConfig {
+	/// A Ed25519 secret key configuration.
+	Ed25519(Secret<ed25519::SecretKey>),
+}
+
+impl Default for NodeKeyConfig {
+	fn default() -> NodeKeyConfig {
+		Self::Ed25519(Secret::New)
+	}
+}
+
+/// The options for obtaining a Ed25519 secret key.
+pub type Ed25519Secret = Secret<ed25519::SecretKey>;
+
+/// The configuration options for obtaining a secret key `K`.
+#[derive(Clone)]
+pub enum Secret<K> {
+	/// Use the given secret key `K`.
+	Input(K),
+	/// Read the secret key from a file. If the file does not exist,
+	/// it is created with a newly generated secret key `K`. The format
+	/// of the file is determined by `K`:
+	///
+	///   * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key.
+	File(PathBuf),
+	/// Always generate a new secret key `K`.
+	New,
+}
+
+impl<K> fmt::Debug for Secret<K> {
+	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+		match self {
+			Self::Input(_) => f.debug_tuple("Secret::Input").finish(),
+			Self::File(path) => f.debug_tuple("Secret::File").field(path).finish(),
+			Self::New => f.debug_tuple("Secret::New").finish(),
+		}
+	}
+}
+
+impl NodeKeyConfig {
+	/// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`:
+	///
+	///  * If the secret is configured as input, the corresponding keypair is returned.
+	///
+	///  * If the secret is configured as a file, it is read from that file, if it exists. Otherwise
+	///    a new secret is generated and stored. In either case, the keypair obtained from the
+	///    secret is returned.
+	///
+	///  * If the secret is configured to be new, it is generated and the corresponding keypair is
+	///    returned.
+	pub fn into_keypair(self) -> io::Result<Keypair> {
+		use NodeKeyConfig::*;
+		match self {
+			Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()),
+
+			Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())),
+
+			Ed25519(Secret::File(f)) => get_secret(
+				f,
+				|mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| {
+					if s.len() == 64 {
+						array_bytes::hex2bytes(&s).ok()
+					} else {
+						None
+					}
+				}) {
+					Some(s) => ed25519::SecretKey::from_bytes(s),
+					_ => ed25519::SecretKey::from_bytes(&mut b),
+				},
+				ed25519::SecretKey::generate,
+				|b| b.as_ref().to_vec(),
+			)
+			.map(ed25519::Keypair::from)
+			.map(Keypair::Ed25519),
+		}
+	}
+}
+
+/// Load a secret key from a file, if it exists, or generate a
+/// new secret key and write it to that file. In either case,
+/// the secret key is returned.
+fn get_secret<P, F, G, E, W, K>(file: P, parse: F, generate: G, serialize: W) -> io::Result<K>
+where
+	P: AsRef<Path>,
+	F: for<'r> FnOnce(&'r mut [u8]) -> Result<K, E>,
+	G: FnOnce() -> K,
+	E: Error + Send + Sync + 'static,
+	W: Fn(&K) -> Vec<u8>,
+{
+	std::fs::read(&file)
+		.and_then(|mut sk_bytes| {
+			parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
+		})
+		.or_else(|e| {
+			if e.kind() == io::ErrorKind::NotFound {
+				file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?;
+				let sk = generate();
+				let mut sk_vec = serialize(&sk);
+				write_secret_file(file, &sk_vec)?;
+				sk_vec.zeroize();
+				Ok(sk)
+			} else {
+				Err(e)
+			}
+		})
+}
+
+/// Write secret bytes to a file.
+fn write_secret_file<P>(path: P, sk_bytes: &[u8]) -> io::Result<()>
+where
+	P: AsRef<Path>,
+{
+	let mut file = open_secret_file(&path)?;
+	file.write_all(sk_bytes)
+}
+
+/// Opens a file containing a secret key in write mode.
+#[cfg(unix)]
+fn open_secret_file<P>(path: P) -> io::Result<fs::File>
+where
+	P: AsRef<Path>,
+{
+	use std::os::unix::fs::OpenOptionsExt;
+	fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path)
+}
+
+/// Opens a file containing a secret key in write mode.
+#[cfg(not(unix))]
+fn open_secret_file<P>(path: P) -> Result<fs::File, io::Error>
+where
+	P: AsRef<Path>,
+{
+	fs::OpenOptions::new().write(true).create_new(true).open(path)
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use tempfile::TempDir;
+
+	fn tempdir_with_prefix(prefix: &str) -> TempDir {
+		tempfile::Builder::new().prefix(prefix).tempdir().unwrap()
+	}
+
+	fn secret_bytes(kp: &Keypair) -> Vec<u8> {
+		let Keypair::Ed25519(p) = kp;
+		p.secret().as_ref().iter().cloned().collect()
+	}
+
+	#[test]
+	fn test_secret_file() {
+		let tmp = tempdir_with_prefix("x");
+		std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated
+		let file = tmp.path().join("x").to_path_buf();
+		let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap();
+		let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap();
+		assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2))
+	}
+
+	#[test]
+	fn test_secret_input() {
+		let sk = ed25519::SecretKey::generate();
+		let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap();
+		let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap();
+		assert!(secret_bytes(&kp1) == secret_bytes(&kp2));
+	}
+
+	#[test]
+	fn test_secret_new() {
+		let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap();
+		let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap();
+		assert!(secret_bytes(&kp1) != secret_bytes(&kp2));
+	}
+}
diff --git a/substrate/client/network/common/src/protocol/event.rs b/substrate/client/network/common/src/protocol/event.rs
index 7a14a280131..90c38b48c32 100644
--- a/substrate/client/network/common/src/protocol/event.rs
+++ b/substrate/client/network/common/src/protocol/event.rs
@@ -48,18 +48,6 @@ pub enum Event {
 	/// Event generated by a DHT.
 	Dht(DhtEvent),
 
-	/// Now connected to a new peer for syncing purposes.
-	SyncConnected {
-		/// Node we are now syncing from.
-		remote: PeerId,
-	},
-
-	/// Now disconnected from a peer for syncing purposes.
-	SyncDisconnected {
-		/// Node we are no longer syncing from.
-		remote: PeerId,
-	},
-
 	/// Opened a substream with the given node with the given notifications protocol.
 	///
 	/// The protocol is always one of the notification protocols that have been registered.
@@ -79,6 +67,8 @@ pub enum Event {
 		negotiated_fallback: Option<ProtocolName>,
 		/// Role of the remote.
 		role: ObservedRole,
+		/// Received handshake.
+		received_handshake: Vec<u8>,
 	},
 
 	/// Closed a substream with the given node. Always matches a corresponding previous
diff --git a/substrate/client/network/common/src/service.rs b/substrate/client/network/common/src/service.rs
index e96a00c40c1..d3c5c2f4394 100644
--- a/substrate/client/network/common/src/service.rs
+++ b/substrate/client/network/common/src/service.rs
@@ -22,14 +22,12 @@ use crate::{
 	config::MultiaddrWithPeerId,
 	protocol::{event::Event, ProtocolName},
 	request_responses::{IfDisconnected, RequestFailure},
-	sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState},
 };
 use futures::{channel::oneshot, Stream};
 pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey};
 use libp2p::{Multiaddr, PeerId};
 use sc_peerset::ReputationChange;
 pub use signature::Signature;
-use sp_runtime::traits::{Block as BlockT, NumberFor};
 use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc};
 
 mod signature;
@@ -96,45 +94,33 @@ where
 
 /// Overview status of the network.
 #[derive(Clone)]
-pub struct NetworkStatus<B: BlockT> {
-	/// Current global sync state.
-	pub sync_state: SyncState<NumberFor<B>>,
-	/// Target sync block number.
-	pub best_seen_block: Option<NumberFor<B>>,
-	/// Number of peers participating in syncing.
-	pub num_sync_peers: u32,
-	/// Total number of connected peers
+pub struct NetworkStatus {
+	/// Total number of connected peers.
 	pub num_connected_peers: usize,
-	/// Total number of active peers.
-	pub num_active_peers: usize,
 	/// The total number of bytes received.
 	pub total_bytes_inbound: u64,
 	/// The total number of bytes sent.
 	pub total_bytes_outbound: u64,
-	/// State sync in progress.
-	pub state_sync: Option<StateDownloadProgress>,
-	/// Warp sync in progress.
-	pub warp_sync: Option<WarpSyncProgress<B>>,
 }
 
 /// Provides high-level status information about network.
 #[async_trait::async_trait]
-pub trait NetworkStatusProvider<Block: BlockT> {
+pub trait NetworkStatusProvider {
 	/// High-level network status information.
 	///
 	/// Returns an error if the `NetworkWorker` is no longer running.
-	async fn status(&self) -> Result<NetworkStatus<Block>, ()>;
+	async fn status(&self) -> Result<NetworkStatus, ()>;
 }
 
 // Manual implementation to avoid extra boxing here
-impl<T, Block: BlockT> NetworkStatusProvider<Block> for Arc<T>
+impl<T> NetworkStatusProvider for Arc<T>
 where
 	T: ?Sized,
-	T: NetworkStatusProvider<Block>,
+	T: NetworkStatusProvider,
 {
 	fn status<'life0, 'async_trait>(
 		&'life0 self,
-	) -> Pin<Box<dyn Future<Output = Result<NetworkStatus<Block>, ()>> + Send + 'async_trait>>
+	) -> Pin<Box<dyn Future<Output = Result<NetworkStatus, ()>> + Send + 'async_trait>>
 	where
 		'life0: 'async_trait,
 		Self: 'async_trait,
@@ -511,6 +497,9 @@ pub trait NetworkNotification {
 		target: PeerId,
 		protocol: ProtocolName,
 	) -> Result<Box<dyn NotificationSender>, NotificationSenderError>;
+
+	/// Set handshake for the notification protocol.
+	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>);
 }
 
 impl<T> NetworkNotification for Arc<T>
@@ -529,6 +518,10 @@ where
 	) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
 		T::notification_sender(self, target, protocol)
 	}
+
+	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
+		T::set_notification_handshake(self, protocol, handshake)
+	}
 }
 
 /// Provides ability to send network requests.
diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs
index 6a98543d420..262da6c202a 100644
--- a/substrate/client/network/common/src/sync.rs
+++ b/substrate/client/network/common/src/sync.rs
@@ -22,7 +22,11 @@ pub mod message;
 pub mod metrics;
 pub mod warp;
 
+use crate::protocol::role::Roles;
+use futures::Stream;
+
 use libp2p::PeerId;
+
 use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse};
 use sc_consensus::{import_queue::RuntimeOrigin, IncomingBlock};
 use sp_consensus::BlockOrigin;
@@ -30,9 +34,10 @@ use sp_runtime::{
 	traits::{Block as BlockT, NumberFor},
 	Justifications,
 };
-use std::{any::Any, fmt, fmt::Formatter, task::Poll};
 use warp::WarpSyncProgress;
 
+use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc, task::Poll};
+
 /// The sync status of a peer we are trying to sync with
 #[derive(Debug)]
 pub struct PeerInfo<Block: BlockT> {
@@ -42,6 +47,17 @@ pub struct PeerInfo<Block: BlockT> {
 	pub best_number: NumberFor<Block>,
 }
 
+/// Info about a peer's known state (both full and light).
+#[derive(Clone, Debug)]
+pub struct ExtendedPeerInfo<B: BlockT> {
+	/// Roles
+	pub roles: Roles,
+	/// Peer best block hash
+	pub best_hash: B::Hash,
+	/// Peer best block number
+	pub best_number: NumberFor<B>,
+}
+
 /// Reported sync state.
 #[derive(Clone, Eq, PartialEq, Debug)]
 pub enum SyncState<BlockNumber> {
@@ -251,6 +267,49 @@ impl fmt::Debug for OpaqueBlockResponse {
 	}
 }
 
+/// Provides high-level status of syncing.
+#[async_trait::async_trait]
+pub trait SyncStatusProvider<Block: BlockT>: Send + Sync {
+	/// Get high-level view of the syncing status.
+	async fn status(&self) -> Result<SyncStatus<Block>, ()>;
+}
+
+#[async_trait::async_trait]
+impl<T, Block> SyncStatusProvider<Block> for Arc<T>
+where
+	T: ?Sized,
+	T: SyncStatusProvider<Block>,
+	Block: BlockT,
+{
+	async fn status(&self) -> Result<SyncStatus<Block>, ()> {
+		T::status(self).await
+	}
+}
+
+/// Syncing-related events that other protocols can subscribe to.
+pub enum SyncEvent {
+	/// Peer that the syncing implementation is tracking connected.
+	PeerConnected(PeerId),
+
+	/// Peer that the syncing implementation was tracking disconnected.
+	PeerDisconnected(PeerId),
+}
+
+pub trait SyncEventStream: Send + Sync {
+	/// Subscribe to syncing-related events.
+	fn event_stream(&self, name: &'static str) -> Pin<Box<dyn Stream<Item = SyncEvent> + Send>>;
+}
+
+impl<T> SyncEventStream for Arc<T>
+where
+	T: ?Sized,
+	T: SyncEventStream,
+{
+	fn event_stream(&self, name: &'static str) -> Pin<Box<dyn Stream<Item = SyncEvent> + Send>> {
+		T::event_stream(self, name)
+	}
+}
+
 /// Something that represents the syncing strategy to download past and future blocks of the chain.
 pub trait ChainSync<Block: BlockT>: Send {
 	/// Returns the state of the sync of the given peer.
diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs
index 1ed8efbcdf9..55912902003 100644
--- a/substrate/client/network/src/behaviour.rs
+++ b/substrate/client/network/src/behaviour.rs
@@ -41,7 +41,6 @@ use sc_network_common::{
 	request_responses::{IfDisconnected, ProtocolConfig, RequestFailure},
 };
 use sc_peerset::{PeersetHandle, ReputationChange};
-use sp_blockchain::HeaderBackend;
 use sp_runtime::traits::Block as BlockT;
 use std::{collections::HashSet, time::Duration};
 
@@ -50,13 +49,9 @@ pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, R
 /// General behaviour of the network. Combines all protocols together.
 #[derive(NetworkBehaviour)]
 #[behaviour(out_event = "BehaviourOut")]
-pub struct Behaviour<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B> + 'static,
-{
+pub struct Behaviour<B: BlockT> {
 	/// All the substrate-specific protocols.
-	substrate: Protocol<B, Client>,
+	substrate: Protocol<B>,
 	/// Periodically pings and identifies the nodes we are connected to, and store information in a
 	/// cache.
 	peer_info: peer_info::PeerInfoBehaviour,
@@ -118,6 +113,8 @@ pub enum BehaviourOut {
 		notifications_sink: NotificationsSink,
 		/// Role of the remote.
 		role: ObservedRole,
+		/// Received handshake.
+		received_handshake: Vec<u8>,
 	},
 
 	/// The [`NotificationsSink`] object used to send notifications with the given peer must be
@@ -151,12 +148,6 @@ pub enum BehaviourOut {
 		messages: Vec<(ProtocolName, Bytes)>,
 	},
 
-	/// Now connected to a new peer for syncing purposes.
-	SyncConnected(PeerId),
-
-	/// No longer connected to a peer for syncing purposes.
-	SyncDisconnected(PeerId),
-
 	/// We have obtained identity information from a peer, including the addresses it is listening
 	/// on.
 	PeerIdentify {
@@ -177,14 +168,10 @@ pub enum BehaviourOut {
 	None,
 }
 
-impl<B, Client> Behaviour<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B> + 'static,
-{
+impl<B: BlockT> Behaviour<B> {
 	/// Builds a new `Behaviour`.
 	pub fn new(
-		substrate: Protocol<B, Client>,
+		substrate: Protocol<B>,
 		user_agent: String,
 		local_public_key: PublicKey,
 		disco_config: DiscoveryConfig,
@@ -252,12 +239,12 @@ where
 	}
 
 	/// Returns a shared reference to the user protocol.
-	pub fn user_protocol(&self) -> &Protocol<B, Client> {
+	pub fn user_protocol(&self) -> &Protocol<B> {
 		&self.substrate
 	}
 
 	/// Returns a mutable reference to the user protocol.
-	pub fn user_protocol_mut(&mut self) -> &mut Protocol<B, Client> {
+	pub fn user_protocol_mut(&mut self) -> &mut Protocol<B> {
 		&mut self.substrate
 	}
 
@@ -295,20 +282,22 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole {
 	}
 }
 
-impl<B: BlockT> From<CustomMessageOutcome<B>> for BehaviourOut {
-	fn from(event: CustomMessageOutcome<B>) -> Self {
+impl From<CustomMessageOutcome> for BehaviourOut {
+	fn from(event: CustomMessageOutcome) -> Self {
 		match event {
 			CustomMessageOutcome::NotificationStreamOpened {
 				remote,
 				protocol,
 				negotiated_fallback,
 				roles,
+				received_handshake,
 				notifications_sink,
 			} => BehaviourOut::NotificationStreamOpened {
 				remote,
 				protocol,
 				negotiated_fallback,
 				role: reported_roles_to_observed_role(roles),
+				received_handshake,
 				notifications_sink,
 			},
 			CustomMessageOutcome::NotificationStreamReplaced {
@@ -320,10 +309,6 @@ impl<B: BlockT> From<CustomMessageOutcome<B>> for BehaviourOut {
 				BehaviourOut::NotificationStreamClosed { remote, protocol },
 			CustomMessageOutcome::NotificationsReceived { remote, messages } =>
 				BehaviourOut::NotificationsReceived { remote, messages },
-			CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None,
-			CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id),
-			CustomMessageOutcome::SyncDisconnected(peer_id) =>
-				BehaviourOut::SyncDisconnected(peer_id),
 			CustomMessageOutcome::None => BehaviourOut::None,
 		}
 	}
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index fc5304779db..90d02af848b 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -22,7 +22,7 @@
 //! See the documentation of [`Params`].
 
 pub use sc_network_common::{
-	config::ProtocolId,
+	config::{NetworkConfiguration, ProtocolId},
 	protocol::role::Role,
 	request_responses::{
 		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
@@ -33,35 +33,12 @@ pub use sc_network_common::{
 
 pub use libp2p::{build_multiaddr, core::PublicKey, identity};
 
-use crate::ChainSyncInterface;
-use core::{fmt, iter};
-use libp2p::{
-	identity::{ed25519, Keypair},
-	multiaddr, Multiaddr,
-};
 use prometheus_endpoint::Registry;
-use sc_network_common::{
-	config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig},
-	sync::ChainSync,
-};
-use sp_runtime::traits::Block as BlockT;
-use std::{
-	error::Error,
-	fs,
-	future::Future,
-	io::{self, Write},
-	net::Ipv4Addr,
-	path::{Path, PathBuf},
-	pin::Pin,
-	sync::Arc,
-};
-use zeroize::Zeroize;
+use sc_network_common::config::NonDefaultSetConfig;
+use std::{future::Future, pin::Pin, sync::Arc};
 
 /// Network initialization parameters.
-pub struct Params<B, Client>
-where
-	B: BlockT + 'static,
-{
+pub struct Params<Client> {
 	/// Assigned role for our node (full, light, ...).
 	pub role: Role,
 
@@ -81,12 +58,6 @@ where
 	/// name on the wire.
 	pub fork_id: Option<String>,
 
-	/// Instance of chain sync implementation.
-	pub chain_sync: Box<dyn ChainSync<B>>,
-
-	/// Interface that can be used to delegate syncing-related function calls to `ChainSync`
-	pub chain_sync_service: Box<dyn ChainSyncInterface<B>>,
-
 	/// Registry for recording prometheus metrics to.
 	pub metrics_registry: Option<Registry>,
 
@@ -96,350 +67,3 @@ where
 	/// Request response protocol configurations
 	pub request_response_protocol_configs: Vec<RequestResponseConfig>,
 }
-
-/// Sync operation mode.
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum SyncMode {
-	/// Full block download and verification.
-	Full,
-	/// Download blocks and the latest state.
-	Fast {
-		/// Skip state proof download and verification.
-		skip_proofs: bool,
-		/// Download indexed transactions for recent blocks.
-		storage_chain_mode: bool,
-	},
-	/// Warp sync - verify authority set transitions and the latest state.
-	Warp,
-}
-
-impl SyncMode {
-	/// Returns if `self` is [`Self::Warp`].
-	pub fn is_warp(&self) -> bool {
-		matches!(self, Self::Warp)
-	}
-
-	/// Returns if `self` is [`Self::Fast`].
-	pub fn is_fast(&self) -> bool {
-		matches!(self, Self::Fast { .. })
-	}
-}
-
-impl Default for SyncMode {
-	fn default() -> Self {
-		Self::Full
-	}
-}
-
-/// Network service configuration.
-#[derive(Clone, Debug)]
-pub struct NetworkConfiguration {
-	/// Directory path to store network-specific configuration. None means nothing will be saved.
-	pub net_config_path: Option<PathBuf>,
-	/// Multiaddresses to listen for incoming connections.
-	pub listen_addresses: Vec<Multiaddr>,
-	/// Multiaddresses to advertise. Detected automatically if empty.
-	pub public_addresses: Vec<Multiaddr>,
-	/// List of initial node addresses
-	pub boot_nodes: Vec<MultiaddrWithPeerId>,
-	/// The node key configuration, which determines the node's network identity keypair.
-	pub node_key: NodeKeyConfig,
-	/// List of request-response protocols that the node supports.
-	pub request_response_protocols: Vec<RequestResponseConfig>,
-	/// Configuration for the default set of nodes used for block syncing and transactions.
-	pub default_peers_set: SetConfig,
-	/// Number of substreams to reserve for full nodes for block syncing and transactions.
-	/// Any other slot will be dedicated to light nodes.
-	///
-	/// This value is implicitly capped to `default_set.out_peers + default_set.in_peers`.
-	pub default_peers_set_num_full: u32,
-	/// Configuration for extra sets of nodes.
-	pub extra_sets: Vec<NonDefaultSetConfig>,
-	/// Client identifier. Sent over the wire for debugging purposes.
-	pub client_version: String,
-	/// Name of the node. Sent over the wire for debugging purposes.
-	pub node_name: String,
-	/// Configuration for the transport layer.
-	pub transport: TransportConfig,
-	/// Maximum number of peers to ask the same blocks in parallel.
-	pub max_parallel_downloads: u32,
-	/// Initial syncing mode.
-	pub sync_mode: SyncMode,
-
-	/// True if Kademlia random discovery should be enabled.
-	///
-	/// If true, the node will automatically randomly walk the DHT in order to find new peers.
-	pub enable_dht_random_walk: bool,
-
-	/// Should we insert non-global addresses into the DHT?
-	pub allow_non_globals_in_dht: bool,
-
-	/// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in
-	/// the presence of potentially adversarial nodes.
-	pub kademlia_disjoint_query_paths: bool,
-	/// Enable serving block data over IPFS bitswap.
-	pub ipfs_server: bool,
-
-	/// Size of Yamux receive window of all substreams. `None` for the default (256kiB).
-	/// Any value less than 256kiB is invalid.
-	///
-	/// # Context
-	///
-	/// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes
-	/// to be transferred at a time, where `N` is the Yamux receive window size configurable here.
-	/// This means, in practice, that every `N` bytes must be acknowledged by the receiver before
-	/// the sender can send more data. The maximum bandwidth of each notifications substream is
-	/// therefore `N / round_trip_time`.
-	///
-	/// It is recommended to leave this to `None`, and use a request-response protocol instead if
-	/// a large amount of data must be transferred. The reason why the value is configurable is
-	/// that some Substrate users mis-use notification protocols to send large amounts of data.
-	/// As such, this option isn't designed to stay and will likely get removed in the future.
-	///
-	/// Note that configuring a value here isn't a modification of the Yamux protocol, but rather
-	/// a modification of the way the implementation works. Different nodes with different
-	/// configured values remain compatible with each other.
-	pub yamux_window_size: Option<u32>,
-}
-
-impl NetworkConfiguration {
-	/// Create new default configuration
-	pub fn new<SN: Into<String>, SV: Into<String>>(
-		node_name: SN,
-		client_version: SV,
-		node_key: NodeKeyConfig,
-		net_config_path: Option<PathBuf>,
-	) -> Self {
-		let default_peers_set = SetConfig::default();
-		Self {
-			net_config_path,
-			listen_addresses: Vec::new(),
-			public_addresses: Vec::new(),
-			boot_nodes: Vec::new(),
-			node_key,
-			request_response_protocols: Vec::new(),
-			default_peers_set_num_full: default_peers_set.in_peers + default_peers_set.out_peers,
-			default_peers_set,
-			extra_sets: Vec::new(),
-			client_version: client_version.into(),
-			node_name: node_name.into(),
-			transport: TransportConfig::Normal { enable_mdns: false, allow_private_ip: true },
-			max_parallel_downloads: 5,
-			sync_mode: SyncMode::Full,
-			enable_dht_random_walk: true,
-			allow_non_globals_in_dht: false,
-			kademlia_disjoint_query_paths: false,
-			yamux_window_size: None,
-			ipfs_server: false,
-		}
-	}
-
-	/// Create new default configuration for localhost-only connection with random port (useful for
-	/// testing)
-	pub fn new_local() -> NetworkConfiguration {
-		let mut config =
-			NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
-
-		config.listen_addresses =
-			vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
-				.chain(iter::once(multiaddr::Protocol::Tcp(0)))
-				.collect()];
-
-		config.allow_non_globals_in_dht = true;
-		config
-	}
-
-	/// Create new default configuration for localhost-only connection with random port (useful for
-	/// testing)
-	pub fn new_memory() -> NetworkConfiguration {
-		let mut config =
-			NetworkConfiguration::new("test-node", "test-client", Default::default(), None);
-
-		config.listen_addresses =
-			vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
-				.chain(iter::once(multiaddr::Protocol::Tcp(0)))
-				.collect()];
-
-		config.allow_non_globals_in_dht = true;
-		config
-	}
-}
-
-/// The configuration of a node's secret key, describing the type of key
-/// and how it is obtained. A node's identity keypair is the result of
-/// the evaluation of the node key configuration.
-#[derive(Clone, Debug)]
-pub enum NodeKeyConfig {
-	/// A Ed25519 secret key configuration.
-	Ed25519(Secret<ed25519::SecretKey>),
-}
-
-impl Default for NodeKeyConfig {
-	fn default() -> NodeKeyConfig {
-		Self::Ed25519(Secret::New)
-	}
-}
-
-/// The options for obtaining a Ed25519 secret key.
-pub type Ed25519Secret = Secret<ed25519::SecretKey>;
-
-/// The configuration options for obtaining a secret key `K`.
-#[derive(Clone)]
-pub enum Secret<K> {
-	/// Use the given secret key `K`.
-	Input(K),
-	/// Read the secret key from a file. If the file does not exist,
-	/// it is created with a newly generated secret key `K`. The format
-	/// of the file is determined by `K`:
-	///
-	///   * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key.
-	File(PathBuf),
-	/// Always generate a new secret key `K`.
-	New,
-}
-
-impl<K> fmt::Debug for Secret<K> {
-	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-		match self {
-			Self::Input(_) => f.debug_tuple("Secret::Input").finish(),
-			Self::File(path) => f.debug_tuple("Secret::File").field(path).finish(),
-			Self::New => f.debug_tuple("Secret::New").finish(),
-		}
-	}
-}
-
-impl NodeKeyConfig {
-	/// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`:
-	///
-	///  * If the secret is configured as input, the corresponding keypair is returned.
-	///
-	///  * If the secret is configured as a file, it is read from that file, if it exists. Otherwise
-	///    a new secret is generated and stored. In either case, the keypair obtained from the
-	///    secret is returned.
-	///
-	///  * If the secret is configured to be new, it is generated and the corresponding keypair is
-	///    returned.
-	pub fn into_keypair(self) -> io::Result<Keypair> {
-		use NodeKeyConfig::*;
-		match self {
-			Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()),
-
-			Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())),
-
-			Ed25519(Secret::File(f)) => get_secret(
-				f,
-				|mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| {
-					if s.len() == 64 {
-						array_bytes::hex2bytes(&s).ok()
-					} else {
-						None
-					}
-				}) {
-					Some(s) => ed25519::SecretKey::from_bytes(s),
-					_ => ed25519::SecretKey::from_bytes(&mut b),
-				},
-				ed25519::SecretKey::generate,
-				|b| b.as_ref().to_vec(),
-			)
-			.map(ed25519::Keypair::from)
-			.map(Keypair::Ed25519),
-		}
-	}
-}
-
-/// Load a secret key from a file, if it exists, or generate a
-/// new secret key and write it to that file. In either case,
-/// the secret key is returned.
-fn get_secret<P, F, G, E, W, K>(file: P, parse: F, generate: G, serialize: W) -> io::Result<K>
-where
-	P: AsRef<Path>,
-	F: for<'r> FnOnce(&'r mut [u8]) -> Result<K, E>,
-	G: FnOnce() -> K,
-	E: Error + Send + Sync + 'static,
-	W: Fn(&K) -> Vec<u8>,
-{
-	std::fs::read(&file)
-		.and_then(|mut sk_bytes| {
-			parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
-		})
-		.or_else(|e| {
-			if e.kind() == io::ErrorKind::NotFound {
-				file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?;
-				let sk = generate();
-				let mut sk_vec = serialize(&sk);
-				write_secret_file(file, &sk_vec)?;
-				sk_vec.zeroize();
-				Ok(sk)
-			} else {
-				Err(e)
-			}
-		})
-}
-
-/// Write secret bytes to a file.
-fn write_secret_file<P>(path: P, sk_bytes: &[u8]) -> io::Result<()>
-where
-	P: AsRef<Path>,
-{
-	let mut file = open_secret_file(&path)?;
-	file.write_all(sk_bytes)
-}
-
-/// Opens a file containing a secret key in write mode.
-#[cfg(unix)]
-fn open_secret_file<P>(path: P) -> io::Result<fs::File>
-where
-	P: AsRef<Path>,
-{
-	use std::os::unix::fs::OpenOptionsExt;
-	fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path)
-}
-
-/// Opens a file containing a secret key in write mode.
-#[cfg(not(unix))]
-fn open_secret_file<P>(path: P) -> Result<fs::File, io::Error>
-where
-	P: AsRef<Path>,
-{
-	fs::OpenOptions::new().write(true).create_new(true).open(path)
-}
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-	use tempfile::TempDir;
-
-	fn tempdir_with_prefix(prefix: &str) -> TempDir {
-		tempfile::Builder::new().prefix(prefix).tempdir().unwrap()
-	}
-
-	fn secret_bytes(kp: &Keypair) -> Vec<u8> {
-		let Keypair::Ed25519(p) = kp;
-		p.secret().as_ref().iter().cloned().collect()
-	}
-
-	#[test]
-	fn test_secret_file() {
-		let tmp = tempdir_with_prefix("x");
-		std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated
-		let file = tmp.path().join("x").to_path_buf();
-		let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap();
-		let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap();
-		assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2))
-	}
-
-	#[test]
-	fn test_secret_input() {
-		let sk = ed25519::SecretKey::generate();
-		let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap();
-		let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap();
-		assert!(secret_bytes(&kp1) == secret_bytes(&kp2));
-	}
-
-	#[test]
-	fn test_secret_new() {
-		let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap();
-		let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap();
-		assert!(secret_bytes(&kp1) != secret_bytes(&kp2));
-	}
-}
diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs
index 8714d66f133..f94a71681cd 100644
--- a/substrate/client/network/src/lib.rs
+++ b/substrate/client/network/src/lib.rs
@@ -257,8 +257,6 @@ pub mod network_state;
 
 #[doc(inline)]
 pub use libp2p::{multiaddr, Multiaddr, PeerId};
-pub use protocol::PeerInfo;
-use sc_consensus::{JustificationSyncLink, Link};
 pub use sc_network_common::{
 	protocol::{
 		event::{DhtEvent, Event},
@@ -273,14 +271,13 @@ pub use sc_network_common::{
 	},
 	sync::{
 		warp::{WarpSyncPhase, WarpSyncProgress},
-		StateDownloadProgress, SyncState,
+		ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider,
 	},
 };
 pub use service::{
 	DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender,
 	NotificationSenderReady, OutboundFailure, PublicKey,
 };
-use sp_runtime::traits::{Block as BlockT, NumberFor};
 
 pub use sc_peerset::ReputationChange;
 
@@ -295,18 +292,3 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2;
 
 /// The maximum number of concurrent established connections that were incoming.
 const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000;
-
-/// Abstraction over syncing-related services
-pub trait ChainSyncInterface<B: BlockT>:
-	NetworkSyncForkRequest<B::Hash, NumberFor<B>> + JustificationSyncLink<B> + Link<B> + Send + Sync
-{
-}
-
-impl<T, B: BlockT> ChainSyncInterface<B> for T where
-	T: NetworkSyncForkRequest<B::Hash, NumberFor<B>>
-		+ JustificationSyncLink<B>
-		+ Link<B>
-		+ Send
-		+ Sync
-{
-}
diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index 2443fce8156..8d604124233 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -19,8 +19,7 @@
 use crate::config;
 
 use bytes::Bytes;
-use codec::{Decode, DecodeAll, Encode};
-use futures::prelude::*;
+use codec::{DecodeAll, Encode};
 use libp2p::{
 	core::connection::ConnectionId,
 	swarm::{
@@ -29,32 +28,20 @@ use libp2p::{
 	},
 	Multiaddr, PeerId,
 };
-use log::{debug, error, log, trace, warn, Level};
-use lru::LruCache;
+use log::{debug, error, warn};
 use message::{generic::Message as GenericMessage, Message};
 use notifications::{Notifications, NotificationsOut};
-use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64};
-use sc_client_api::HeaderBackend;
 use sc_network_common::{
 	config::NonReservedPeerMode,
 	error,
 	protocol::{role::Roles, ProtocolName},
-	sync::{
-		message::{BlockAnnounce, BlockAnnouncesHandshake, BlockData, BlockResponse, BlockState},
-		BadPeer, ChainSync, PollBlockAnnounceValidation, SyncStatus,
-	},
-	utils::{interval, LruHashSet},
+	sync::message::BlockAnnouncesHandshake,
 };
-use sp_arithmetic::traits::SaturatedConversion;
-use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero};
+use sp_runtime::traits::Block as BlockT;
 use std::{
 	collections::{HashMap, HashSet, VecDeque},
 	iter,
-	num::NonZeroUsize,
-	pin::Pin,
-	sync::Arc,
 	task::Poll,
-	time,
 };
 
 mod notifications;
@@ -63,12 +50,6 @@ pub mod message;
 
 pub use notifications::{NotificationsSink, NotifsHandlerError, Ready};
 
-/// Interval at which we perform time based maintenance
-const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100);
-
-/// Maximum number of known block hashes to keep for a peer.
-const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead
-
 /// Maximum size used for notifications in the block announce and transaction protocols.
 // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`.
 pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024;
@@ -79,88 +60,16 @@ const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0);
 /// superior to this value corresponds to a user-defined protocol.
 const NUM_HARDCODED_PEERSETS: usize = 1;
 
-/// When light node connects to the full node and the full node is behind light node
-/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful
-/// and disconnect to free connection slot.
-const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192;
-
 mod rep {
 	use sc_peerset::ReputationChange as Rep;
-	/// Reputation change when we are a light client and a peer is behind us.
-	pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer");
 	/// We received a message that failed to decode.
 	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
-	/// Peer has different genesis.
-	pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch");
-	/// Peer role does not match (e.g. light peer connecting to another light peer).
-	pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role");
-	/// Peer send us a block announcement that failed at validation.
-	pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement");
-}
-
-struct Metrics {
-	peers: Gauge<U64>,
-	queued_blocks: Gauge<U64>,
-	fork_targets: Gauge<U64>,
-	justifications: GaugeVec<U64>,
-}
-
-impl Metrics {
-	fn register(r: &Registry) -> Result<Self, PrometheusError> {
-		Ok(Self {
-			peers: {
-				let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?;
-				register(g, r)?
-			},
-			queued_blocks: {
-				let g =
-					Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?;
-				register(g, r)?
-			},
-			fork_targets: {
-				let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?;
-				register(g, r)?
-			},
-			justifications: {
-				let g = GaugeVec::new(
-					Opts::new(
-						"substrate_sync_extra_justifications",
-						"Number of extra justifications requests",
-					),
-					&["status"],
-				)?;
-				register(g, r)?
-			},
-		})
-	}
 }
 
 // Lock must always be taken in order declared here.
-pub struct Protocol<B: BlockT, Client> {
-	/// Interval at which we call `tick`.
-	tick_timeout: Pin<Box<dyn Stream<Item = ()> + Send>>,
+pub struct Protocol<B: BlockT> {
 	/// Pending list of messages to return from `poll` as a priority.
-	pending_messages: VecDeque<CustomMessageOutcome<B>>,
-	/// Assigned roles.
-	roles: Roles,
-	genesis_hash: B::Hash,
-	/// State machine that handles the list of in-progress requests. Only full node peers are
-	/// registered.
-	chain_sync: Box<dyn ChainSync<B>>,
-	// All connected peers. Contains both full and light node peers.
-	peers: HashMap<PeerId, Peer<B>>,
-	chain: Arc<Client>,
-	/// List of nodes for which we perform additional logging because they are important for the
-	/// user.
-	important_peers: HashSet<PeerId>,
-	/// List of nodes that should never occupy peer slots.
-	default_peers_set_no_slot_peers: HashSet<PeerId>,
-	/// Actual list of connected no-slot nodes.
-	default_peers_set_no_slot_connected_peers: HashSet<PeerId>,
-	/// Value that was passed as part of the configuration. Used to cap the number of full nodes.
-	default_peers_set_num_full: usize,
-	/// Number of slots to allocate to light nodes.
-	default_peers_set_num_light: usize,
+	pending_messages: VecDeque<CustomMessageOutcome>,
 	/// Used to report reputation changes.
 	peerset_handle: sc_peerset::PeersetHandle,
 	/// Handles opening the unique substream and sending and receiving raw messages.
@@ -174,85 +83,18 @@ pub struct Protocol<B: BlockT, Client> {
 	/// solve this, an entry is added to this map whenever an invalid handshake is received.
 	/// Entries are removed when the corresponding "substream closed" is later received.
 	bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>,
-	/// Prometheus metrics.
-	metrics: Option<Metrics>,
-	/// The `PeerId`'s of all boot nodes.
-	boot_node_ids: HashSet<PeerId>,
-	/// A cache for the data that was associated to a block announcement.
-	block_announce_data_cache: LruCache<B::Hash, Vec<u8>>,
+	/// Connected peers.
+	peers: HashMap<PeerId, Roles>,
+	_marker: std::marker::PhantomData<B>,
 }
 
-/// Peer information
-#[derive(Debug)]
-struct Peer<B: BlockT> {
-	info: PeerInfo<B>,
-	/// Holds a set of blocks known to this peer.
-	known_blocks: LruHashSet<B::Hash>,
-}
-
-/// Info about a peer's known state.
-#[derive(Clone, Debug)]
-pub struct PeerInfo<B: BlockT> {
-	/// Roles
-	pub roles: Roles,
-	/// Peer best block hash
-	pub best_hash: B::Hash,
-	/// Peer best block number
-	pub best_number: <B::Header as HeaderT>::Number,
-}
-
-impl<B, Client> Protocol<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B> + 'static,
-{
+impl<B: BlockT> Protocol<B> {
 	/// Create a new instance.
 	pub fn new(
 		roles: Roles,
-		chain: Arc<Client>,
 		network_config: &config::NetworkConfiguration,
-		metrics_registry: Option<&Registry>,
-		chain_sync: Box<dyn ChainSync<B>>,
 		block_announces_protocol: sc_network_common::config::NonDefaultSetConfig,
 	) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> {
-		let info = chain.info();
-
-		let boot_node_ids = {
-			let mut list = HashSet::new();
-			for node in &network_config.boot_nodes {
-				list.insert(node.peer_id);
-			}
-			list.shrink_to_fit();
-			list
-		};
-
-		let important_peers = {
-			let mut imp_p = HashSet::new();
-			for reserved in &network_config.default_peers_set.reserved_nodes {
-				imp_p.insert(reserved.peer_id);
-			}
-			for reserved in network_config
-				.extra_sets
-				.iter()
-				.flat_map(|s| s.set_config.reserved_nodes.iter())
-			{
-				imp_p.insert(reserved.peer_id);
-			}
-			imp_p.shrink_to_fit();
-			imp_p
-		};
-
-		let default_peers_set_no_slot_peers = {
-			let mut no_slot_p: HashSet<PeerId> = network_config
-				.default_peers_set
-				.reserved_nodes
-				.iter()
-				.map(|reserved| reserved.peer_id)
-				.collect();
-			no_slot_p.shrink_to_fit();
-			no_slot_p
-		};
-
 		let mut known_addresses = Vec::new();
 
 		let (peerset, peerset_handle) = {
@@ -326,44 +168,17 @@ where
 			)
 		};
 
-		let cache_capacity = NonZeroUsize::new(
-			(network_config.default_peers_set.in_peers as usize +
-				network_config.default_peers_set.out_peers as usize)
-				.max(1),
-		)
-		.expect("cache capacity is not zero");
-		let block_announce_data_cache = LruCache::new(cache_capacity);
-
 		let protocol = Self {
-			tick_timeout: Box::pin(interval(TICK_TIMEOUT)),
 			pending_messages: VecDeque::new(),
-			roles,
-			peers: HashMap::new(),
-			chain,
-			genesis_hash: info.genesis_hash,
-			chain_sync,
-			important_peers,
-			default_peers_set_no_slot_peers,
-			default_peers_set_no_slot_connected_peers: HashSet::new(),
-			default_peers_set_num_full: network_config.default_peers_set_num_full as usize,
-			default_peers_set_num_light: {
-				let total = network_config.default_peers_set.out_peers +
-					network_config.default_peers_set.in_peers;
-				total.saturating_sub(network_config.default_peers_set_num_full) as usize
-			},
 			peerset_handle: peerset_handle.clone(),
 			behaviour,
 			notification_protocols: iter::once(block_announces_protocol.notifications_protocol)
 				.chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone()))
 				.collect(),
 			bad_handshake_substreams: Default::default(),
-			metrics: if let Some(r) = metrics_registry {
-				Some(Metrics::register(r)?)
-			} else {
-				None
-			},
-			boot_node_ids,
-			block_announce_data_cache,
+			peers: HashMap::new(),
+			// TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol`
+			_marker: Default::default(),
 		};
 
 		Ok((protocol, peerset_handle, known_addresses))
@@ -384,6 +199,7 @@ where
 		if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name)
 		{
 			self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position));
+			self.peers.remove(peer_id);
 		} else {
 			warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name")
 		}
@@ -399,391 +215,23 @@ where
 		self.peers.len()
 	}
 
-	/// Returns the number of peers we're connected to and that are being queried.
-	pub fn num_active_peers(&self) -> usize {
-		self.chain_sync.num_active_peers()
-	}
-
-	/// Current global sync state.
-	pub fn sync_state(&self) -> SyncStatus<B> {
-		self.chain_sync.status()
-	}
-
-	/// Target sync block number.
-	pub fn best_seen_block(&self) -> Option<NumberFor<B>> {
-		self.chain_sync.status().best_seen_block
-	}
-
-	/// Number of peers participating in syncing.
-	pub fn num_sync_peers(&self) -> u32 {
-		self.chain_sync.status().num_peers
-	}
-
-	/// Number of blocks in the import queue.
-	pub fn num_queued_blocks(&self) -> u32 {
-		self.chain_sync.status().queued_blocks
-	}
-
-	/// Number of downloaded blocks.
-	pub fn num_downloaded_blocks(&self) -> usize {
-		self.chain_sync.num_downloaded_blocks()
-	}
-
-	/// Number of active sync requests.
-	pub fn num_sync_requests(&self) -> usize {
-		self.chain_sync.num_sync_requests()
-	}
-
-	/// Inform sync about new best imported block.
-	pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor<B>) {
-		debug!(target: "sync", "New best block imported {:?}/#{}", hash, number);
-
-		self.chain_sync.update_chain_info(&hash, number);
-
-		self.behaviour.set_notif_protocol_handshake(
-			HARDCODED_PEERSETS_SYNC,
-			BlockAnnouncesHandshake::<B>::build(self.roles, number, hash, self.genesis_hash)
-				.encode(),
-		);
-	}
-
-	fn update_peer_info(&mut self, who: &PeerId) {
-		if let Some(info) = self.chain_sync.peer_info(who) {
-			if let Some(ref mut peer) = self.peers.get_mut(who) {
-				peer.info.best_hash = info.best_hash;
-				peer.info.best_number = info.best_number;
-			}
-		}
-	}
-
-	/// Returns information about all the peers we are connected to after the handshake message.
-	pub fn peers_info(&self) -> impl Iterator<Item = (&PeerId, &PeerInfo<B>)> {
-		self.peers.iter().map(|(id, peer)| (id, &peer.info))
-	}
-
-	/// Called by peer when it is disconnecting.
-	///
-	/// Returns a result if the handshake of this peer was indeed accepted.
-	pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> {
-		if self.important_peers.contains(&peer) {
-			warn!(target: "sync", "Reserved peer {} disconnected", peer);
-		} else {
-			debug!(target: "sync", "{} disconnected", peer);
-		}
-
-		if let Some(_peer_data) = self.peers.remove(&peer) {
-			self.chain_sync.peer_disconnected(&peer);
-			self.default_peers_set_no_slot_connected_peers.remove(&peer);
-			Ok(())
-		} else {
-			Err(())
-		}
-	}
-
 	/// Adjusts the reputation of a node.
 	pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) {
 		self.peerset_handle.report_peer(who, reputation)
 	}
 
-	/// Perform time based maintenance.
-	///
-	/// > **Note**: This method normally doesn't have to be called except for testing purposes.
-	pub fn tick(&mut self) {
-		self.report_metrics()
-	}
-
-	/// Called on the first connection between two peers on the default set, after their exchange
-	/// of handshake.
-	///
-	/// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync
-	/// from.
-	fn on_sync_peer_connected(
-		&mut self,
-		who: PeerId,
-		status: BlockAnnouncesHandshake<B>,
-	) -> Result<(), ()> {
-		trace!(target: "sync", "New peer {} {:?}", who, status);
-
-		if self.peers.contains_key(&who) {
-			error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who);
-			debug_assert!(false);
-			return Err(())
-		}
-
-		if status.genesis_hash != self.genesis_hash {
-			log!(
-				target: "sync",
-				if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug },
-				"Peer is on different chain (our genesis: {} theirs: {})",
-				self.genesis_hash, status.genesis_hash
-			);
-			self.peerset_handle.report_peer(who, rep::GENESIS_MISMATCH);
-			self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-
-			if self.boot_node_ids.contains(&who) {
-				error!(
-					target: "sync",
-					"Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})",
-					who,
-					self.genesis_hash,
-					status.genesis_hash,
-				);
-			}
-
-			return Err(())
-		}
-
-		if self.roles.is_light() {
-			// we're not interested in light peers
-			if status.roles.is_light() {
-				debug!(target: "sync", "Peer {} is unable to serve light requests", who);
-				self.peerset_handle.report_peer(who, rep::BAD_ROLE);
-				self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-				return Err(())
-			}
-
-			// we don't interested in peers that are far behind us
-			let self_best_block = self.chain.info().best_number;
-			let blocks_difference = self_best_block
-				.checked_sub(&status.best_number)
-				.unwrap_or_else(Zero::zero)
-				.saturated_into::<u64>();
-			if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE {
-				debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who);
-				self.peerset_handle.report_peer(who, rep::PEER_BEHIND_US_LIGHT);
-				self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-				return Err(())
-			}
-		}
-
-		let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who);
-		let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };
-
-		if status.roles.is_full() &&
-			self.chain_sync.num_peers() >=
-				self.default_peers_set_num_full +
-					self.default_peers_set_no_slot_connected_peers.len() +
-					this_peer_reserved_slot
-		{
-			debug!(target: "sync", "Too many full nodes, rejecting {}", who);
-			self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-			return Err(())
-		}
-
-		if status.roles.is_light() &&
-			(self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light
-		{
-			// Make sure that not all slots are occupied by light clients.
-			debug!(target: "sync", "Too many light nodes, rejecting {}", who);
-			self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-			return Err(())
-		}
-
-		let peer = Peer {
-			info: PeerInfo {
-				roles: status.roles,
-				best_hash: status.best_hash,
-				best_number: status.best_number,
-			},
-			known_blocks: LruHashSet::new(
-				NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"),
-			),
-		};
-
-		let req = if peer.info.roles.is_full() {
-			match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) {
-				Ok(req) => req,
-				Err(BadPeer(id, repu)) => {
-					self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC);
-					self.peerset_handle.report_peer(id, repu);
-					return Err(())
-				},
-			}
+	/// Set handshake for the notification protocol.
+	pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec<u8>) {
+		if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) {
+			self.behaviour
+				.set_notif_protocol_handshake(sc_peerset::SetId::from(index), handshake);
 		} else {
-			None
-		};
-
-		debug!(target: "sync", "Connected {}", who);
-
-		self.peers.insert(who, peer);
-		if no_slot_peer {
-			self.default_peers_set_no_slot_connected_peers.insert(who);
-		}
-		self.pending_messages
-			.push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number));
-
-		if let Some(req) = req {
-			self.chain_sync.send_block_request(who, req);
-		}
-
-		Ok(())
-	}
-
-	/// Make sure an important block is propagated to peers.
-	///
-	/// In chain-based consensus, we often need to make sure non-best forks are
-	/// at least temporarily synced.
-	pub fn announce_block(&mut self, hash: B::Hash, data: Option<Vec<u8>>) {
-		let header = match self.chain.header(hash) {
-			Ok(Some(header)) => header,
-			Ok(None) => {
-				warn!("Trying to announce unknown block: {}", hash);
-				return
-			},
-			Err(e) => {
-				warn!("Error reading block header {}: {}", hash, e);
-				return
-			},
-		};
-
-		// don't announce genesis block since it will be ignored
-		if header.number().is_zero() {
-			return
-		}
-
-		let is_best = self.chain.info().best_hash == hash;
-		debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best);
-
-		let data = data
-			.or_else(|| self.block_announce_data_cache.get(&hash).cloned())
-			.unwrap_or_default();
-
-		for (who, ref mut peer) in self.peers.iter_mut() {
-			let inserted = peer.known_blocks.insert(hash);
-			if inserted {
-				trace!(target: "sync", "Announcing block {:?} to {}", hash, who);
-				let message = BlockAnnounce {
-					header: header.clone(),
-					state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) },
-					data: Some(data.clone()),
-				};
-
-				self.behaviour
-					.write_notification(who, HARDCODED_PEERSETS_SYNC, message.encode());
-			}
-		}
-	}
-
-	/// Push a block announce validation.
-	///
-	/// It is required that [`ChainSync::poll_block_announce_validation`] is
-	/// called later to check for finished validations. The result of the validation
-	/// needs to be passed to [`Protocol::process_block_announce_validation_result`]
-	/// to finish the processing.
-	///
-	/// # Note
-	///
-	/// This will internally create a future, but this future will not be registered
-	/// in the task before being polled once. So, it is required to call
-	/// [`ChainSync::poll_block_announce_validation`] to ensure that the future is
-	/// registered properly and will wake up the task when being ready.
-	fn push_block_announce_validation(&mut self, who: PeerId, announce: BlockAnnounce<B::Header>) {
-		let hash = announce.header.hash();
-
-		let peer = match self.peers.get_mut(&who) {
-			Some(p) => p,
-			None => {
-				log::error!(target: "sync", "Received block announce from disconnected peer {}", who);
-				debug_assert!(false);
-				return
-			},
-		};
-
-		peer.known_blocks.insert(hash);
-
-		let is_best = match announce.state.unwrap_or(BlockState::Best) {
-			BlockState::Best => true,
-			BlockState::Normal => false,
-		};
-
-		if peer.info.roles.is_full() {
-			self.chain_sync.push_block_announce_validation(who, hash, announce, is_best);
-		}
-	}
-
-	/// Process the result of the block announce validation.
-	fn process_block_announce_validation_result(
-		&mut self,
-		validation_result: PollBlockAnnounceValidation<B::Header>,
-	) -> CustomMessageOutcome<B> {
-		let (header, is_best, who) = match validation_result {
-			PollBlockAnnounceValidation::Skip => return CustomMessageOutcome::None,
-			PollBlockAnnounceValidation::Nothing { is_best, who, announce } => {
-				self.update_peer_info(&who);
-
-				if let Some(data) = announce.data {
-					if !data.is_empty() {
-						self.block_announce_data_cache.put(announce.header.hash(), data);
-					}
-				}
-
-				// `on_block_announce` returns `OnBlockAnnounce::ImportHeader`
-				// when we have all data required to import the block
-				// in the BlockAnnounce message. This is only when:
-				// 1) we're on light client;
-				// AND
-				// 2) parent block is already imported and not pruned.
-				if is_best {
-					return CustomMessageOutcome::PeerNewBest(who, *announce.header.number())
-				} else {
-					return CustomMessageOutcome::None
-				}
-			},
-			PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => {
-				self.update_peer_info(&who);
-
-				if let Some(data) = announce.data {
-					if !data.is_empty() {
-						self.block_announce_data_cache.put(announce.header.hash(), data);
-					}
-				}
-
-				(announce.header, is_best, who)
-			},
-			PollBlockAnnounceValidation::Failure { who, disconnect } => {
-				if disconnect {
-					self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
-				}
-
-				self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT);
-				return CustomMessageOutcome::None
-			},
-		};
-
-		let number = *header.number();
-
-		// to import header from announced block let's construct response to request that normally
-		// would have been sent over network (but it is not in our case)
-		let blocks_to_import = self.chain_sync.on_block_data(
-			&who,
-			None,
-			BlockResponse::<B> {
-				id: 0,
-				blocks: vec![BlockData::<B> {
-					hash: header.hash(),
-					header: Some(header),
-					body: None,
-					indexed_body: None,
-					receipt: None,
-					message_queue: None,
-					justification: None,
-					justifications: None,
-				}],
-			},
-		);
-		self.chain_sync.process_block_response_data(blocks_to_import);
-
-		if is_best {
-			self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number));
+			error!(
+				target: "sub-libp2p",
+				"set_notification_handshake with unknown protocol: {}",
+				protocol
+			);
 		}
-
-		CustomMessageOutcome::None
-	}
-
-	/// Call this when a block has been finalized. The sync layer may have some additional
-	/// requesting to perform.
-	pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) {
-		self.chain_sync.on_block_finalized(&hash, *header.number())
 	}
 
 	/// Set whether the syncing peers set is in reserved-only mode.
@@ -884,41 +332,12 @@ where
 			);
 		}
 	}
-
-	fn report_metrics(&self) {
-		if let Some(metrics) = &self.metrics {
-			let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX);
-			metrics.peers.set(n);
-
-			let m = self.chain_sync.metrics();
-
-			metrics.fork_targets.set(m.fork_targets.into());
-			metrics.queued_blocks.set(m.queued_blocks.into());
-
-			metrics
-				.justifications
-				.with_label_values(&["pending"])
-				.set(m.justifications.pending_requests.into());
-			metrics
-				.justifications
-				.with_label_values(&["active"])
-				.set(m.justifications.active_requests.into());
-			metrics
-				.justifications
-				.with_label_values(&["failed"])
-				.set(m.justifications.failed_requests.into());
-			metrics
-				.justifications
-				.with_label_values(&["importing"])
-				.set(m.justifications.importing_requests.into());
-		}
-	}
 }
 
 /// Outcome of an incoming custom message.
 #[derive(Debug)]
 #[must_use]
-pub enum CustomMessageOutcome<B: BlockT> {
+pub enum CustomMessageOutcome {
 	/// Notification protocols have been opened with a remote.
 	NotificationStreamOpened {
 		remote: PeerId,
@@ -926,6 +345,7 @@ pub enum CustomMessageOutcome<B: BlockT> {
 		/// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`].
 		negotiated_fallback: Option<ProtocolName>,
 		roles: Roles,
+		received_handshake: Vec<u8>,
 		notifications_sink: NotificationsSink,
 	},
 	/// The [`NotificationsSink`] of some notification protocols need an update.
@@ -935,31 +355,16 @@ pub enum CustomMessageOutcome<B: BlockT> {
 		notifications_sink: NotificationsSink,
 	},
 	/// Notification protocols have been closed with a remote.
-	NotificationStreamClosed {
-		remote: PeerId,
-		protocol: ProtocolName,
-	},
+	NotificationStreamClosed { remote: PeerId, protocol: ProtocolName },
 	/// Messages have been received on one or more notifications protocols.
-	NotificationsReceived {
-		remote: PeerId,
-		messages: Vec<(ProtocolName, Bytes)>,
-	},
-	/// Peer has a reported a new head of chain.
-	PeerNewBest(PeerId, NumberFor<B>),
+	NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> },
 	/// Now connected to a new peer for syncing purposes.
-	SyncConnected(PeerId),
-	/// No longer connected to a peer for syncing purposes.
-	SyncDisconnected(PeerId),
 	None,
 }
 
-impl<B, Client> NetworkBehaviour for Protocol<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B> + 'static,
-{
+impl<B: BlockT> NetworkBehaviour for Protocol<B> {
 	type ConnectionHandler = <Notifications as NetworkBehaviour>::ConnectionHandler;
-	type OutEvent = CustomMessageOutcome<B>;
+	type OutEvent = CustomMessageOutcome;
 
 	fn new_handler(&mut self) -> Self::ConnectionHandler {
 		self.behaviour.new_handler()
@@ -994,25 +399,6 @@ where
 			return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message))
 		}
 
-		// Advance the state of `ChainSync`
-		//
-		// Process any received requests received from `NetworkService` and
-		// check if there is any block announcement validation finished.
-		while let Poll::Ready(result) = self.chain_sync.poll(cx) {
-			match self.process_block_announce_validation_result(result) {
-				CustomMessageOutcome::None => {},
-				outcome => self.pending_messages.push_back(outcome),
-			}
-		}
-
-		while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) {
-			self.tick();
-		}
-
-		if let Some(message) = self.pending_messages.pop_front() {
-			return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message))
-		}
-
 		let event = match self.behaviour.poll(cx, params) {
 			Poll::Pending => return Poll::Pending,
 			Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev,
@@ -1045,17 +431,22 @@ where
 					// announces substream.
 					match <Message<B> as DecodeAll>::decode_all(&mut &received_handshake[..]) {
 						Ok(GenericMessage::Status(handshake)) => {
-							let handshake = BlockAnnouncesHandshake {
+							let roles = handshake.roles;
+							let handshake = BlockAnnouncesHandshake::<B> {
 								roles: handshake.roles,
 								best_number: handshake.best_number,
 								best_hash: handshake.best_hash,
 								genesis_hash: handshake.genesis_hash,
 							};
+							self.peers.insert(peer_id, roles);
 
-							if self.on_sync_peer_connected(peer_id, handshake).is_ok() {
-								CustomMessageOutcome::SyncConnected(peer_id)
-							} else {
-								CustomMessageOutcome::None
+							CustomMessageOutcome::NotificationStreamOpened {
+								remote: peer_id,
+								protocol: self.notification_protocols[usize::from(set_id)].clone(),
+								negotiated_fallback,
+								received_handshake: handshake.encode(),
+								roles,
+								notifications_sink,
 							}
 						},
 						Ok(msg) => {
@@ -1073,14 +464,21 @@ where
 								&mut &received_handshake[..],
 							) {
 								Ok(handshake) => {
-									if self.on_sync_peer_connected(peer_id, handshake).is_ok() {
-										CustomMessageOutcome::SyncConnected(peer_id)
-									} else {
-										CustomMessageOutcome::None
+									let roles = handshake.roles;
+									self.peers.insert(peer_id, roles);
+
+									CustomMessageOutcome::NotificationStreamOpened {
+										remote: peer_id,
+										protocol: self.notification_protocols[usize::from(set_id)]
+											.clone(),
+										negotiated_fallback,
+										received_handshake,
+										roles,
+										notifications_sink,
 									}
 								},
 								Err(err2) => {
-									debug!(
+									log::debug!(
 										target: "sync",
 										"Couldn't decode handshake sent by {}: {:?}: {} & {}",
 										peer_id,
@@ -1104,9 +502,10 @@ where
 							protocol: self.notification_protocols[usize::from(set_id)].clone(),
 							negotiated_fallback,
 							roles,
+							received_handshake,
 							notifications_sink,
 						},
-						(Err(_), Some(peer)) if received_handshake.is_empty() => {
+						(Err(_), Some(roles)) if received_handshake.is_empty() => {
 							// As a convenience, we allow opening substreams for "external"
 							// notification protocols with an empty handshake. This fetches the
 							// roles from the locally-known roles.
@@ -1115,7 +514,8 @@ where
 								remote: peer_id,
 								protocol: self.notification_protocols[usize::from(set_id)].clone(),
 								negotiated_fallback,
-								roles: peer.info.roles,
+								roles: *roles,
+								received_handshake,
 								notifications_sink,
 							}
 						},
@@ -1124,15 +524,14 @@ where
 							self.bad_handshake_substreams.insert((peer_id, set_id));
 							self.behaviour.disconnect_peer(&peer_id, set_id);
 							self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE);
+							self.peers.remove(&peer_id);
 							CustomMessageOutcome::None
 						},
 					}
 				}
 			},
 			NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } =>
-				if set_id == HARDCODED_PEERSETS_SYNC ||
-					self.bad_handshake_substreams.contains(&(peer_id, set_id))
-				{
+				if self.bad_handshake_substreams.contains(&(peer_id, set_id)) {
 					CustomMessageOutcome::None
 				} else {
 					CustomMessageOutcome::NotificationStreamReplaced {
@@ -1142,19 +541,7 @@ where
 					}
 				},
 			NotificationsOut::CustomProtocolClosed { peer_id, set_id } => {
-				// Set number 0 is hardcoded the default set of peers we sync from.
-				if set_id == HARDCODED_PEERSETS_SYNC {
-					if self.on_sync_peer_disconnected(peer_id).is_ok() {
-						CustomMessageOutcome::SyncDisconnected(peer_id)
-					} else {
-						log::trace!(
-							target: "sync",
-							"Disconnected peer which had earlier been refused by on_sync_peer_connected {}",
-							peer_id
-						);
-						CustomMessageOutcome::None
-					}
-				} else if self.bad_handshake_substreams.remove(&(peer_id, set_id)) {
+				if self.bad_handshake_substreams.remove(&(peer_id, set_id)) {
 					// The substream that has just been closed had been opened with a bad
 					// handshake. The outer layers have never received an opening event about this
 					// substream, and consequently shouldn't receive a closing event either.
@@ -1166,45 +553,20 @@ where
 					}
 				}
 			},
-			NotificationsOut::Notification { peer_id, set_id, message } => match set_id {
-				HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => {
-					if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) {
-						self.push_block_announce_validation(peer_id, announce);
-
-						// Make sure that the newly added block announce validation future was
-						// polled once to be registered in the task.
-						if let Poll::Ready(res) = self.chain_sync.poll_block_announce_validation(cx)
-						{
-							self.process_block_announce_validation_result(res)
-						} else {
-							CustomMessageOutcome::None
-						}
-					} else {
-						warn!(target: "sub-libp2p", "Failed to decode block announce");
-						CustomMessageOutcome::None
-					}
-				},
-				HARDCODED_PEERSETS_SYNC => {
-					trace!(
-						target: "sync",
-						"Received sync for peer earlier refused by sync layer: {}",
-						peer_id
-					);
+			NotificationsOut::Notification { peer_id, set_id, message } => {
+				if self.bad_handshake_substreams.contains(&(peer_id, set_id)) {
 					CustomMessageOutcome::None
-				},
-				_ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) =>
-					CustomMessageOutcome::None,
-				_ => {
+				} else {
 					let protocol_name = self.notification_protocols[usize::from(set_id)].clone();
 					CustomMessageOutcome::NotificationsReceived {
 						remote: peer_id,
 						messages: vec![(protocol_name, message.freeze())],
 					}
-				},
+				}
 			},
 		};
 
-		if !matches!(outcome, CustomMessageOutcome::<B>::None) {
+		if !matches!(outcome, CustomMessageOutcome::None) {
 			return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome))
 		}
 
diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs
index 027ea0ab531..093d5846db7 100644
--- a/substrate/client/network/src/protocol/notifications/behaviour.rs
+++ b/substrate/client/network/src/protocol/notifications/behaviour.rs
@@ -538,48 +538,6 @@ impl Notifications {
 		self.peerset.reserved_peers(set_id)
 	}
 
-	/// Sends a notification to a peer.
-	///
-	/// Has no effect if the custom protocol is not open with the given peer.
-	///
-	/// Also note that even if we have a valid open substream, it may in fact be already closed
-	/// without us knowing, in which case the packet will not be received.
-	///
-	/// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't
-	/// support our protocol. One needs to pass the equivalent of what would have been passed
-	/// with `send_packet`.
-	pub fn write_notification(
-		&mut self,
-		target: &PeerId,
-		set_id: sc_peerset::SetId,
-		message: impl Into<Vec<u8>>,
-	) {
-		let notifs_sink = match self.peers.get(&(*target, set_id)).and_then(|p| p.get_open()) {
-			None => {
-				trace!(
-					target: "sub-libp2p",
-					"Tried to sent notification to {:?} without an open channel.",
-					target,
-				);
-				return
-			},
-			Some(sink) => sink,
-		};
-
-		let message = message.into();
-
-		trace!(
-			target: "sub-libp2p",
-			"External API => Notification({:?}, {:?}, {} bytes)",
-			target,
-			set_id,
-			message.len(),
-		);
-		trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target);
-
-		notifs_sink.send_sync_notification(message);
-	}
-
 	/// Returns the state of the peerset manager, for debugging purposes.
 	pub fn peerset_debug_info(&mut self) -> serde_json::Value {
 		self.peerset.debug_info()
@@ -3058,7 +3016,13 @@ mod tests {
 			panic!("invalid state");
 		}
 
-		notif.write_notification(&peer, set_id, vec![1, 3, 3, 7]);
+		notif
+			.peers
+			.get(&(peer, set_id))
+			.unwrap()
+			.get_open()
+			.unwrap()
+			.send_sync_notification(vec![1, 3, 3, 7]);
 		assert_eq!(conn_yielder.get_next_event(peer, set_id.into()).await, Some(vec![1, 3, 3, 7]));
 	}
 
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index fd0b965b773..4a05393618c 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -34,8 +34,8 @@ use crate::{
 	network_state::{
 		NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer,
 	},
-	protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready},
-	transport, ChainSyncInterface, ReputationChange,
+	protocol::{self, NotificationsSink, NotifsHandlerError, Protocol, Ready},
+	transport, ReputationChange,
 };
 
 use futures::{channel::oneshot, prelude::*};
@@ -65,17 +65,16 @@ use sc_network_common::{
 	request_responses::{IfDisconnected, RequestFailure},
 	service::{
 		NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner,
-		NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest,
+		NetworkStateInfo, NetworkStatus, NetworkStatusProvider,
 		NotificationSender as NotificationSenderT, NotificationSenderError,
 		NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError,
 	},
-	sync::SyncStatus,
 	ExHashT,
 };
 use sc_peerset::PeersetHandle;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use sp_blockchain::HeaderBackend;
-use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
+use sp_runtime::traits::{Block as BlockT, Zero};
 use std::{
 	cmp,
 	collections::{HashMap, HashSet},
@@ -85,7 +84,7 @@ use std::{
 	pin::Pin,
 	str,
 	sync::{
-		atomic::{AtomicBool, AtomicUsize, Ordering},
+		atomic::{AtomicUsize, Ordering},
 		Arc,
 	},
 };
@@ -98,7 +97,7 @@ mod out_events;
 mod tests;
 
 pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey};
-use sc_network_common::service::{NetworkBlock, NetworkRequest};
+use sc_network_common::service::NetworkRequest;
 
 /// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`].
 /// Used as a template parameter of [`SwarmEvent`] below.
@@ -114,8 +113,6 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	external_addresses: Arc<Mutex<Vec<Multiaddr>>>,
 	/// Listen addresses. Do **NOT** include a trailing `/p2p/` with our `PeerId`.
 	listen_addresses: Arc<Mutex<Vec<Multiaddr>>>,
-	/// Are we actively catching up with the chain?
-	is_major_syncing: Arc<AtomicBool>,
 	/// Local copy of the `PeerId` of the local node.
 	local_peer_id: PeerId,
 	/// The `KeyPair` that defines the `PeerId` of the local node.
@@ -126,9 +123,7 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	/// nodes it should be connected to or not.
 	peerset: PeersetHandle,
 	/// Channel that sends messages to the actual worker.
-	to_worker: TracingUnboundedSender<ServiceToWorkerMsg<B>>,
-	/// Interface that can be used to delegate calls to `ChainSync`
-	chain_sync_service: Box<dyn ChainSyncInterface<B>>,
+	to_worker: TracingUnboundedSender<ServiceToWorkerMsg>,
 	/// For each peer and protocol combination, an object that allows sending notifications to
 	/// that peer. Updated by the [`NetworkWorker`].
 	peers_notifications_sinks: Arc<Mutex<HashMap<(PeerId, ProtocolName), NotificationsSink>>>,
@@ -138,20 +133,23 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
 	/// compatibility.
 	_marker: PhantomData<H>,
+	/// Marker for block type
+	_block: PhantomData<B>,
 }
 
-impl<B, H, Client> NetworkWorker<B, H, Client>
+impl<B, H> NetworkWorker<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	Client: HeaderBackend<B> + 'static,
 {
 	/// Creates the network service.
 	///
 	/// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order
 	/// for the network processing to advance. From it, you can extract a `NetworkService` using
 	/// `worker.service()`. The `NetworkService` can be shared through the codebase.
-	pub fn new(mut params: Params<B, Client>) -> Result<Self, Error> {
+	pub fn new<Client: HeaderBackend<B> + 'static>(
+		mut params: Params<Client>,
+	) -> Result<Self, Error> {
 		// Private and public keys configuration.
 		let local_identity = params.network_config.node_key.clone().into_keypair()?;
 		let local_public = local_identity.public();
@@ -230,10 +228,7 @@ where
 
 		let (protocol, peerset_handle, mut known_addresses) = Protocol::new(
 			From::from(&params.role),
-			params.chain.clone(),
 			&params.network_config,
-			params.metrics_registry.as_ref(),
-			params.chain_sync,
 			params.block_announce_config,
 		)?;
 
@@ -268,10 +263,9 @@ where
 		})?;
 
 		let num_connected = Arc::new(AtomicUsize::new(0));
-		let is_major_syncing = Arc::new(AtomicBool::new(false));
 
 		// Build the swarm.
-		let (mut swarm, bandwidth): (Swarm<Behaviour<B, Client>>, _) = {
+		let (mut swarm, bandwidth): (Swarm<Behaviour<B>>, _) = {
 			let user_agent = format!(
 				"{} ({})",
 				params.network_config.client_version, params.network_config.node_name
@@ -418,7 +412,6 @@ where
 				registry,
 				MetricSources {
 					bandwidth: bandwidth.clone(),
-					major_syncing: is_major_syncing.clone(),
 					connected_peers: num_connected.clone(),
 				},
 			)?),
@@ -427,14 +420,14 @@ where
 
 		// Listen on multiaddresses.
 		for addr in &params.network_config.listen_addresses {
-			if let Err(err) = Swarm::<Behaviour<B, Client>>::listen_on(&mut swarm, addr.clone()) {
+			if let Err(err) = Swarm::<Behaviour<B>>::listen_on(&mut swarm, addr.clone()) {
 				warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err)
 			}
 		}
 
 		// Add external addresses.
 		for addr in &params.network_config.public_addresses {
-			Swarm::<Behaviour<B, Client>>::add_external_address(
+			Swarm::<Behaviour<B>>::add_external_address(
 				&mut swarm,
 				addr.clone(),
 				AddressScore::Infinite,
@@ -450,24 +443,22 @@ where
 			external_addresses: external_addresses.clone(),
 			listen_addresses: listen_addresses.clone(),
 			num_connected: num_connected.clone(),
-			is_major_syncing: is_major_syncing.clone(),
 			peerset: peerset_handle,
 			local_peer_id,
 			local_identity,
 			to_worker,
-			chain_sync_service: params.chain_sync_service,
 			peers_notifications_sinks: peers_notifications_sinks.clone(),
 			notifications_sizes_metric: metrics
 				.as_ref()
 				.map(|metrics| metrics.notifications_sizes.clone()),
 			_marker: PhantomData,
+			_block: Default::default(),
 		});
 
 		Ok(NetworkWorker {
 			external_addresses,
 			listen_addresses,
 			num_connected,
-			is_major_syncing,
 			network_service: swarm,
 			service,
 			from_service,
@@ -476,22 +467,16 @@ where
 			metrics,
 			boot_node_ids,
 			_marker: Default::default(),
+			_block: Default::default(),
 		})
 	}
 
 	/// High-level network status information.
-	pub fn status(&self) -> NetworkStatus<B> {
-		let status = self.sync_state();
+	pub fn status(&self) -> NetworkStatus {
 		NetworkStatus {
-			sync_state: status.state,
-			best_seen_block: self.best_seen_block(),
-			num_sync_peers: self.num_sync_peers(),
 			num_connected_peers: self.num_connected_peers(),
-			num_active_peers: self.num_active_peers(),
 			total_bytes_inbound: self.total_bytes_inbound(),
 			total_bytes_outbound: self.total_bytes_outbound(),
-			state_sync: status.state_sync,
-			warp_sync: status.warp_sync,
 		}
 	}
 
@@ -510,42 +495,7 @@ where
 		self.network_service.behaviour().user_protocol().num_connected_peers()
 	}
 
-	/// Returns the number of peers we're connected to and that are being queried.
-	pub fn num_active_peers(&self) -> usize {
-		self.network_service.behaviour().user_protocol().num_active_peers()
-	}
-
-	/// Current global sync state.
-	pub fn sync_state(&self) -> SyncStatus<B> {
-		self.network_service.behaviour().user_protocol().sync_state()
-	}
-
-	/// Target sync block number.
-	pub fn best_seen_block(&self) -> Option<NumberFor<B>> {
-		self.network_service.behaviour().user_protocol().best_seen_block()
-	}
-
-	/// Number of peers participating in syncing.
-	pub fn num_sync_peers(&self) -> u32 {
-		self.network_service.behaviour().user_protocol().num_sync_peers()
-	}
-
-	/// Number of blocks in the import queue.
-	pub fn num_queued_blocks(&self) -> u32 {
-		self.network_service.behaviour().user_protocol().num_queued_blocks()
-	}
-
-	/// Returns the number of downloaded blocks.
-	pub fn num_downloaded_blocks(&self) -> usize {
-		self.network_service.behaviour().user_protocol().num_downloaded_blocks()
-	}
-
-	/// Number of active sync requests.
-	pub fn num_sync_requests(&self) -> usize {
-		self.network_service.behaviour().user_protocol().num_sync_requests()
-	}
-
-	/// Adds an address known to a node.
+	/// Adds an address for a node.
 	pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) {
 		self.network_service.behaviour_mut().add_known_address(peer_id, addr);
 	}
@@ -556,32 +506,16 @@ where
 		&self.service
 	}
 
-	/// You must call this when a new block is finalized by the client.
-	pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) {
-		self.network_service
-			.behaviour_mut()
-			.user_protocol_mut()
-			.on_block_finalized(hash, &header);
-	}
-
-	/// Inform the network service about new best imported block.
-	pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor<B>) {
-		self.network_service
-			.behaviour_mut()
-			.user_protocol_mut()
-			.new_best_block_imported(hash, number);
-	}
-
 	/// Returns the local `PeerId`.
 	pub fn local_peer_id(&self) -> &PeerId {
-		Swarm::<Behaviour<B, Client>>::local_peer_id(&self.network_service)
+		Swarm::<Behaviour<B>>::local_peer_id(&self.network_service)
 	}
 
 	/// Returns the list of addresses we are listening on.
 	///
 	/// Does **NOT** include a trailing `/p2p/` with our `PeerId`.
 	pub fn listen_addresses(&self) -> impl Iterator<Item = &Multiaddr> {
-		Swarm::<Behaviour<B, Client>>::listeners(&self.network_service)
+		Swarm::<Behaviour<B>>::listeners(&self.network_service)
 	}
 
 	/// Get network state.
@@ -661,7 +595,7 @@ where
 				.collect()
 		};
 
-		let peer_id = Swarm::<Behaviour<B, Client>>::local_peer_id(swarm).to_base58();
+		let peer_id = Swarm::<Behaviour<B>>::local_peer_id(swarm).to_base58();
 		let listened_addresses = swarm.listeners().cloned().collect();
 		let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect();
 
@@ -675,16 +609,6 @@ where
 		}
 	}
 
-	/// Get currently connected peers.
-	pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo<B>)> {
-		self.network_service
-			.behaviour_mut()
-			.user_protocol_mut()
-			.peers_info()
-			.map(|(id, info)| (*id, info.clone()))
-			.collect()
-	}
-
 	/// Removes a `PeerId` from the list of reserved peers.
 	pub fn remove_reserved_peer(&self, peer: PeerId) {
 		self.service.remove_reserved_peer(peer);
@@ -722,20 +646,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
 		}
 	}
 
-	/// Get connected peers debug information.
-	///
-	/// Returns an error if the `NetworkWorker` is no longer running.
-	pub async fn peers_debug_info(&self) -> Result<Vec<(PeerId, PeerInfo<B>)>, ()> {
-		let (tx, rx) = oneshot::channel();
-
-		let _ = self
-			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::PeersDebugInfo { pending_response: tx });
-
-		// The channel can only be closed if the network worker no longer exists.
-		rx.await.map_err(|_| ())
-	}
-
 	/// Get the list of reserved peers.
 	///
 	/// Returns an error if the `NetworkWorker` is no longer running.
@@ -779,30 +689,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
 	}
 }
 
-impl<B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for NetworkService<B, H> {
-	fn is_major_syncing(&self) -> bool {
-		self.is_major_syncing.load(Ordering::Relaxed)
-	}
-
-	fn is_offline(&self) -> bool {
-		self.num_connected.load(Ordering::Relaxed) == 0
-	}
-}
-
-impl<B: BlockT, H: ExHashT> sc_consensus::JustificationSyncLink<B> for NetworkService<B, H> {
-	/// Request a justification for the given block from the network.
-	///
-	/// On success, the justification will be passed to the import queue that was part at
-	/// initialization as part of the configuration.
-	fn request_justification(&self, hash: &B::Hash, number: NumberFor<B>) {
-		let _ = self.chain_sync_service.request_justification(hash, number);
-	}
-
-	fn clear_justification_requests(&self) {
-		let _ = self.chain_sync_service.clear_justification_requests();
-	}
-}
-
 impl<B, H> NetworkStateInfo for NetworkService<B, H>
 where
 	B: sp_runtime::traits::Block,
@@ -856,29 +742,13 @@ where
 	}
 }
 
-impl<B, H> NetworkSyncForkRequest<B::Hash, NumberFor<B>> for NetworkService<B, H>
-where
-	B: BlockT + 'static,
-	H: ExHashT,
-{
-	/// Configure an explicit fork sync request.
-	/// Note that this function should not be used for recent blocks.
-	/// Sync should be able to download all the recent forks normally.
-	/// `set_sync_fork_request` should only be used if external code detects that there's
-	/// a stale fork missing.
-	/// Passing empty `peers` set effectively removes the sync request.
-	fn set_sync_fork_request(&self, peers: Vec<PeerId>, hash: B::Hash, number: NumberFor<B>) {
-		self.chain_sync_service.set_sync_fork_request(peers, hash, number);
-	}
-}
-
 #[async_trait::async_trait]
-impl<B, H> NetworkStatusProvider<B> for NetworkService<B, H>
+impl<B, H> NetworkStatusProvider for NetworkService<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
 {
-	async fn status(&self) -> Result<NetworkStatus<B>, ()> {
+	async fn status(&self) -> Result<NetworkStatus, ()> {
 		let (tx, rx) = oneshot::channel();
 
 		let _ = self
@@ -1125,6 +995,12 @@ where
 
 		Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric }))
 	}
+
+	fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
+		let _ = self
+			.to_worker
+			.unbounded_send(ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake));
+	}
 }
 
 #[async_trait::async_trait]
@@ -1171,22 +1047,6 @@ where
 	}
 }
 
-impl<B, H> NetworkBlock<B::Hash, NumberFor<B>> for NetworkService<B, H>
-where
-	B: BlockT + 'static,
-	H: ExHashT,
-{
-	fn announce_block(&self, hash: B::Hash, data: Option<Vec<u8>>) {
-		let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data));
-	}
-
-	fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor<B>) {
-		let _ = self
-			.to_worker
-			.unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number));
-	}
-}
-
 /// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol.
 #[must_use]
 pub struct NotificationSender {
@@ -1257,8 +1117,7 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> {
 /// Messages sent from the `NetworkService` to the `NetworkWorker`.
 ///
 /// Each entry corresponds to a method of `NetworkService`.
-enum ServiceToWorkerMsg<B: BlockT> {
-	AnnounceBlock(B::Hash, Option<Vec<u8>>),
+enum ServiceToWorkerMsg {
 	GetValue(KademliaKey),
 	PutValue(KademliaKey, Vec<u8>),
 	AddKnownAddress(PeerId, Multiaddr),
@@ -1280,16 +1139,13 @@ enum ServiceToWorkerMsg<B: BlockT> {
 		connect: IfDisconnected,
 	},
 	NetworkStatus {
-		pending_response: oneshot::Sender<Result<NetworkStatus<B>, RequestFailure>>,
+		pending_response: oneshot::Sender<Result<NetworkStatus, RequestFailure>>,
 	},
 	NetworkState {
 		pending_response: oneshot::Sender<Result<NetworkState, RequestFailure>>,
 	},
 	DisconnectPeer(PeerId, ProtocolName),
-	NewBestBlockImported(B::Hash, NumberFor<B>),
-	PeersDebugInfo {
-		pending_response: oneshot::Sender<Vec<(PeerId, PeerInfo<B>)>>,
-	},
+	SetNotificationHandshake(ProtocolName, Vec<u8>),
 	ReservedPeers {
 		pending_response: oneshot::Sender<Vec<PeerId>>,
 	},
@@ -1299,11 +1155,10 @@ enum ServiceToWorkerMsg<B: BlockT> {
 ///
 /// You are encouraged to poll this in a separate background thread or task.
 #[must_use = "The NetworkWorker must be polled in order for the network to advance"]
-pub struct NetworkWorker<B, H, Client>
+pub struct NetworkWorker<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	Client: HeaderBackend<B> + 'static,
 {
 	/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
 	external_addresses: Arc<Mutex<Vec<Multiaddr>>>,
@@ -1311,14 +1166,12 @@ where
 	listen_addresses: Arc<Mutex<Vec<Multiaddr>>>,
 	/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
 	num_connected: Arc<AtomicUsize>,
-	/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
-	is_major_syncing: Arc<AtomicBool>,
 	/// The network service that can be extracted and shared through the codebase.
 	service: Arc<NetworkService<B, H>>,
 	/// The *actual* network.
-	network_service: Swarm<Behaviour<B, Client>>,
+	network_service: Swarm<Behaviour<B>>,
 	/// Messages from the [`NetworkService`] that must be processed.
-	from_service: TracingUnboundedReceiver<ServiceToWorkerMsg<B>>,
+	from_service: TracingUnboundedReceiver<ServiceToWorkerMsg>,
 	/// Senders for events that happen on the network.
 	event_streams: out_events::OutChannels,
 	/// Prometheus network metrics.
@@ -1331,13 +1184,14 @@ where
 	/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
 	/// compatibility.
 	_marker: PhantomData<H>,
+	/// Marker for block type
+	_block: PhantomData<B>,
 }
 
-impl<B, H, Client> NetworkWorker<B, H, Client>
+impl<B, H> NetworkWorker<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	Client: HeaderBackend<B> + 'static,
 {
 	/// Run the network.
 	pub async fn run(mut self) {
@@ -1364,10 +1218,9 @@ where
 			},
 		};
 
+		// Update the variables shared with the `NetworkService`.
 		let num_connected_peers =
 			self.network_service.behaviour_mut().user_protocol_mut().num_connected_peers();
-
-		// Update the variables shared with the `NetworkService`.
 		self.num_connected.store(num_connected_peers, Ordering::Relaxed);
 		{
 			let external_addresses =
@@ -1379,16 +1232,6 @@ where
 			*self.listen_addresses.lock() = listen_addresses;
 		}
 
-		let is_major_syncing = self
-			.network_service
-			.behaviour_mut()
-			.user_protocol_mut()
-			.sync_state()
-			.state
-			.is_major_syncing();
-
-		self.is_major_syncing.store(is_major_syncing, Ordering::Relaxed);
-
 		if let Some(metrics) = self.metrics.as_ref() {
 			if let Some(buckets) = self.network_service.behaviour_mut().num_entries_per_kbucket() {
 				for (lower_ilog2_bucket_bound, num_entries) in buckets {
@@ -1420,13 +1263,8 @@ where
 	}
 
 	/// Process the next message coming from the `NetworkService`.
-	fn handle_worker_message(&mut self, msg: ServiceToWorkerMsg<B>) {
+	fn handle_worker_message(&mut self, msg: ServiceToWorkerMsg) {
 		match msg {
-			ServiceToWorkerMsg::AnnounceBlock(hash, data) => self
-				.network_service
-				.behaviour_mut()
-				.user_protocol_mut()
-				.announce_block(hash, data),
 			ServiceToWorkerMsg::GetValue(key) =>
 				self.network_service.behaviour_mut().get_value(key),
 			ServiceToWorkerMsg::PutValue(key, value) =>
@@ -1505,14 +1343,11 @@ where
 				.behaviour_mut()
 				.user_protocol_mut()
 				.disconnect_peer(&who, protocol_name),
-			ServiceToWorkerMsg::NewBestBlockImported(hash, number) => self
+			ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake) => self
 				.network_service
 				.behaviour_mut()
 				.user_protocol_mut()
-				.new_best_block_imported(hash, number),
-			ServiceToWorkerMsg::PeersDebugInfo { pending_response } => {
-				let _ = pending_response.send(self.peers_debug_info());
-			},
+				.set_notification_handshake(protocol, handshake),
 			ServiceToWorkerMsg::ReservedPeers { pending_response } => {
 				let _ =
 					pending_response.send(self.reserved_peers().map(ToOwned::to_owned).collect());
@@ -1523,7 +1358,7 @@ where
 	/// Process the next event coming from `Swarm`.
 	fn handle_swarm_event(
 		&mut self,
-		event: SwarmEvent<BehaviourOut, ConnectionHandlerErr<Behaviour<B, Client>>>,
+		event: SwarmEvent<BehaviourOut, ConnectionHandlerErr<Behaviour<B>>>,
 	) {
 		match event {
 			SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => {
@@ -1642,6 +1477,7 @@ where
 				negotiated_fallback,
 				notifications_sink,
 				role,
+				received_handshake,
 			}) => {
 				if let Some(metrics) = self.metrics.as_ref() {
 					metrics
@@ -1660,6 +1496,7 @@ where
 					protocol,
 					negotiated_fallback,
 					role,
+					received_handshake,
 				});
 			},
 			SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced {
@@ -1725,12 +1562,6 @@ where
 				}
 				self.event_streams.send(Event::NotificationsReceived { remote, messages });
 			},
-			SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote)) => {
-				self.event_streams.send(Event::SyncConnected { remote });
-			},
-			SwarmEvent::Behaviour(BehaviourOut::SyncDisconnected(remote)) => {
-				self.event_streams.send(Event::SyncDisconnected { remote });
-			},
 			SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration)) => {
 				if let Some(metrics) = self.metrics.as_ref() {
 					let query_type = match event {
@@ -1925,11 +1756,10 @@ where
 	}
 }
 
-impl<B, H, Client> Unpin for NetworkWorker<B, H, Client>
+impl<B, H> Unpin for NetworkWorker<B, H>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	Client: HeaderBackend<B> + 'static,
 {
 }
 
diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs
index ccb440b2c3e..13bc4b4e7af 100644
--- a/substrate/client/network/src/service/metrics.rs
+++ b/substrate/client/network/src/service/metrics.rs
@@ -24,7 +24,7 @@ use prometheus_endpoint::{
 use std::{
 	str,
 	sync::{
-		atomic::{AtomicBool, AtomicUsize, Ordering},
+		atomic::{AtomicUsize, Ordering},
 		Arc,
 	},
 };
@@ -34,7 +34,6 @@ pub use prometheus_endpoint::{Histogram, HistogramVec};
 /// Registers all networking metrics with the given registry.
 pub fn register(registry: &Registry, sources: MetricSources) -> Result<Metrics, PrometheusError> {
 	BandwidthCounters::register(registry, sources.bandwidth)?;
-	MajorSyncingGauge::register(registry, sources.major_syncing)?;
 	NumConnectedGauge::register(registry, sources.connected_peers)?;
 	Metrics::register(registry)
 }
@@ -42,7 +41,6 @@ pub fn register(registry: &Registry, sources: MetricSources) -> Result<Metrics,
 /// Predefined metric sources that are fed directly into prometheus.
 pub struct MetricSources {
 	pub bandwidth: Arc<BandwidthSinks>,
-	pub major_syncing: Arc<AtomicBool>,
 	pub connected_peers: Arc<AtomicUsize>,
 }
 
@@ -266,37 +264,6 @@ impl MetricSource for BandwidthCounters {
 	}
 }
 
-/// The "major syncing" metric.
-#[derive(Clone)]
-pub struct MajorSyncingGauge(Arc<AtomicBool>);
-
-impl MajorSyncingGauge {
-	/// Registers the `MajorSyncGauge` metric whose value is
-	/// obtained from the given `AtomicBool`.
-	fn register(registry: &Registry, value: Arc<AtomicBool>) -> Result<(), PrometheusError> {
-		prometheus::register(
-			SourcedGauge::new(
-				&Opts::new(
-					"substrate_sub_libp2p_is_major_syncing",
-					"Whether the node is performing a major sync or not.",
-				),
-				MajorSyncingGauge(value),
-			)?,
-			registry,
-		)?;
-
-		Ok(())
-	}
-}
-
-impl MetricSource for MajorSyncingGauge {
-	type N = u64;
-
-	fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) {
-		set(&[], self.0.load(Ordering::Relaxed) as u64);
-	}
-}
-
 /// The connected peers metric.
 #[derive(Clone)]
 pub struct NumConnectedGauge(Arc<AtomicUsize>);
diff --git a/substrate/client/network/src/service/out_events.rs b/substrate/client/network/src/service/out_events.rs
index 99ac022c2d8..3771ea1643a 100644
--- a/substrate/client/network/src/service/out_events.rs
+++ b/substrate/client/network/src/service/out_events.rs
@@ -268,12 +268,6 @@ impl Metrics {
 			Event::Dht(_) => {
 				self.events_total.with_label_values(&["dht", "sent", name]).inc();
 			},
-			Event::SyncConnected { .. } => {
-				self.events_total.with_label_values(&["sync-connected", "sent", name]).inc();
-			},
-			Event::SyncDisconnected { .. } => {
-				self.events_total.with_label_values(&["sync-disconnected", "sent", name]).inc();
-			},
 			Event::NotificationStreamOpened { protocol, .. } => {
 				format_label("notif-open-", protocol, |protocol_label| {
 					self.events_total.with_label_values(&[protocol_label, "sent", name]).inc();
@@ -301,14 +295,6 @@ impl Metrics {
 			Event::Dht(_) => {
 				self.events_total.with_label_values(&["dht", "received", name]).inc();
 			},
-			Event::SyncConnected { .. } => {
-				self.events_total.with_label_values(&["sync-connected", "received", name]).inc();
-			},
-			Event::SyncDisconnected { .. } => {
-				self.events_total
-					.with_label_values(&["sync-disconnected", "received", name])
-					.inc();
-			},
 			Event::NotificationStreamOpened { protocol, .. } => {
 				format_label("notif-open-", protocol, |protocol_label| {
 					self.events_total.with_label_values(&[protocol_label, "received", name]).inc();
diff --git a/substrate/client/network/src/service/tests/chain_sync.rs b/substrate/client/network/src/service/tests/chain_sync.rs
deleted file mode 100644
index a369b717489..00000000000
--- a/substrate/client/network/src/service/tests/chain_sync.rs
+++ /dev/null
@@ -1,420 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-use crate::{
-	config,
-	service::tests::{TestNetworkBuilder, BLOCK_ANNOUNCE_PROTO_NAME},
-};
-
-use futures::prelude::*;
-use libp2p::PeerId;
-use sc_block_builder::BlockBuilderProvider;
-use sc_client_api::HeaderBackend;
-use sc_consensus::JustificationSyncLink;
-use sc_network_common::{
-	config::{MultiaddrWithPeerId, ProtocolId, SetConfig},
-	protocol::{event::Event, role::Roles, ProtocolName},
-	service::NetworkSyncForkRequest,
-	sync::{SyncState, SyncStatus},
-};
-use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface, ChainSync};
-use sp_core::H256;
-use sp_runtime::traits::{Block as BlockT, Header as _};
-use std::{
-	sync::{Arc, RwLock},
-	task::Poll,
-	time::Duration,
-};
-use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _};
-
-fn set_default_expecations_no_peers(
-	chain_sync: &mut MockChainSync<substrate_test_runtime_client::runtime::Block>,
-) {
-	chain_sync.expect_poll().returning(|_| Poll::Pending);
-	chain_sync.expect_status().returning(|| SyncStatus {
-		state: SyncState::Idle,
-		best_seen_block: None,
-		num_peers: 0u32,
-		queued_blocks: 0u32,
-		state_sync: None,
-		warp_sync: None,
-	});
-}
-
-#[tokio::test]
-async fn normal_network_poll_no_peers() {
-	// build `ChainSync` and set default expectations for it
-	let mut chain_sync =
-		Box::new(MockChainSync::<substrate_test_runtime_client::runtime::Block>::new());
-	set_default_expecations_no_peers(&mut chain_sync);
-
-	// build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be
-	// called)
-	let chain_sync_service =
-		Box::new(MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new());
-
-	let mut network = TestNetworkBuilder::new()
-		.with_chain_sync((chain_sync, chain_sync_service))
-		.build();
-
-	// perform one action on network
-	let _ = network.network().next_action().await;
-}
-
-#[tokio::test]
-async fn request_justification() {
-	let hash = H256::random();
-	let number = 1337u64;
-
-	// build `ChainSyncInterface` provider and and expect
-	// `JustificationSyncLink::request_justification() to be called once
-	let mut chain_sync_service =
-		Box::new(MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new());
-
-	chain_sync_service
-		.expect_justification_sync_link_request_justification()
-		.withf(move |in_hash, in_number| &hash == in_hash && &number == in_number)
-		.once()
-		.returning(|_, _| ());
-
-	// build `ChainSync` and set default expecations for it
-	let mut chain_sync = MockChainSync::<substrate_test_runtime_client::runtime::Block>::new();
-
-	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
-		.with_chain_sync((Box::new(chain_sync), chain_sync_service))
-		.build();
-
-	// send "request justifiction" message and poll the network
-	network.service().request_justification(&hash, number);
-
-	// perform one action on network
-	let _ = network.network().next_action().await;
-}
-
-#[tokio::test]
-async fn clear_justification_requests() {
-	// build `ChainSyncInterface` provider and expect
-	// `JustificationSyncLink::clear_justification_requests()` to be called
-	let mut chain_sync_service =
-		Box::new(MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new());
-
-	chain_sync_service
-		.expect_justification_sync_link_clear_justification_requests()
-		.once()
-		.returning(|| ());
-
-	// build `ChainSync` and set default expecations for it
-	let mut chain_sync =
-		Box::new(MockChainSync::<substrate_test_runtime_client::runtime::Block>::new());
-
-	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
-		.with_chain_sync((chain_sync, chain_sync_service))
-		.build();
-
-	// send "request justifiction" message and poll the network
-	network.service().clear_justification_requests();
-
-	// perform one action on network
-	let _ = network.network().next_action().await;
-}
-
-#[tokio::test]
-async fn set_sync_fork_request() {
-	// build `ChainSync` and set default expectations for it
-	let mut chain_sync =
-		Box::new(MockChainSync::<substrate_test_runtime_client::runtime::Block>::new());
-	set_default_expecations_no_peers(&mut chain_sync);
-
-	// build `ChainSyncInterface` provider and verify that the `set_sync_fork_request()`
-	// call is delegated to `ChainSyncInterface` (which eventually forwards it to `ChainSync`)
-	let mut chain_sync_service =
-		MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new();
-
-	let hash = H256::random();
-	let number = 1337u64;
-	let peers = (0..3).map(|_| PeerId::random()).collect::<Vec<_>>();
-	let copy_peers = peers.clone();
-
-	chain_sync_service
-		.expect_set_sync_fork_request()
-		.withf(move |in_peers, in_hash, in_number| {
-			&peers == in_peers && &hash == in_hash && &number == in_number
-		})
-		.once()
-		.returning(|_, _, _| ());
-
-	let mut network = TestNetworkBuilder::new()
-		.with_chain_sync((chain_sync, Box::new(chain_sync_service)))
-		.build();
-
-	// send "set sync fork request" message and poll the network
-	network.service().set_sync_fork_request(copy_peers, hash, number);
-
-	// perform one action on network
-	let _ = network.network().next_action().await;
-}
-
-#[tokio::test]
-async fn on_block_finalized() {
-	let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0);
-	// build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be
-	// called)
-	let chain_sync_service =
-		Box::new(MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new());
-
-	// build `ChainSync` and verify that call to `on_block_finalized()` is made
-	let mut chain_sync =
-		Box::new(MockChainSync::<substrate_test_runtime_client::runtime::Block>::new());
-
-	let at = client.header(client.info().best_hash).unwrap().unwrap().hash();
-	let block = client
-		.new_block_at(at, Default::default(), false)
-		.unwrap()
-		.build()
-		.unwrap()
-		.block;
-	let header = block.header.clone();
-	let block_number = *header.number();
-	let hash = block.hash();
-
-	chain_sync
-		.expect_on_block_finalized()
-		.withf(move |in_hash, in_number| &hash == in_hash && &block_number == in_number)
-		.once()
-		.returning(|_, _| ());
-
-	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
-		.with_client(client)
-		.with_chain_sync((chain_sync, chain_sync_service))
-		.build();
-
-	// send "set sync fork request" message and poll the network
-	network.network().on_block_finalized(hash, header);
-
-	// perform one action on network
-	let _ = network.network().next_action().await;
-}
-
-// report from mock import queue that importing a justification was not successful
-// and verify that connection to the peer is closed
-#[tokio::test]
-async fn invalid_justification_imported() {
-	struct DummyImportQueueHandle;
-
-	impl
-		sc_consensus::import_queue::ImportQueueService<
-			substrate_test_runtime_client::runtime::Block,
-		> for DummyImportQueueHandle
-	{
-		fn import_blocks(
-			&mut self,
-			_origin: sp_consensus::BlockOrigin,
-			_blocks: Vec<
-				sc_consensus::IncomingBlock<substrate_test_runtime_client::runtime::Block>,
-			>,
-		) {
-		}
-
-		fn import_justifications(
-			&mut self,
-			_who: sc_consensus::import_queue::RuntimeOrigin,
-			_hash: substrate_test_runtime_client::runtime::Hash,
-			_number: sp_runtime::traits::NumberFor<substrate_test_runtime_client::runtime::Block>,
-			_justifications: sp_runtime::Justifications,
-		) {
-		}
-	}
-
-	struct DummyImportQueue(
-		Arc<
-			RwLock<
-				Option<(
-					PeerId,
-					substrate_test_runtime_client::runtime::Hash,
-					sp_runtime::traits::NumberFor<substrate_test_runtime_client::runtime::Block>,
-				)>,
-			>,
-		>,
-		DummyImportQueueHandle,
-	);
-
-	#[async_trait::async_trait]
-	impl sc_consensus::ImportQueue<substrate_test_runtime_client::runtime::Block> for DummyImportQueue {
-		fn poll_actions(
-			&mut self,
-			_cx: &mut futures::task::Context,
-			link: &mut dyn sc_consensus::Link<substrate_test_runtime_client::runtime::Block>,
-		) {
-			if let Some((peer, hash, number)) = *self.0.read().unwrap() {
-				link.justification_imported(peer, &hash, number, false);
-			}
-		}
-
-		fn service(
-			&self,
-		) -> Box<
-			dyn sc_consensus::import_queue::ImportQueueService<
-				substrate_test_runtime_client::runtime::Block,
-			>,
-		> {
-			Box::new(DummyImportQueueHandle {})
-		}
-
-		fn service_ref(
-			&mut self,
-		) -> &mut dyn sc_consensus::import_queue::ImportQueueService<
-			substrate_test_runtime_client::runtime::Block,
-		> {
-			&mut self.1
-		}
-
-		async fn run(
-			self,
-			_link: Box<dyn sc_consensus::Link<substrate_test_runtime_client::runtime::Block>>,
-		) {
-		}
-	}
-
-	let justification_info = Arc::new(RwLock::new(None));
-	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
-
-	let (service1, mut event_stream1) = TestNetworkBuilder::new()
-		.with_import_queue(Box::new(DummyImportQueue(
-			justification_info.clone(),
-			DummyImportQueueHandle {},
-		)))
-		.with_listen_addresses(vec![listen_addr.clone()])
-		.build()
-		.start_network();
-
-	let (service2, mut event_stream2) = TestNetworkBuilder::new()
-		.with_set_config(SetConfig {
-			reserved_nodes: vec![MultiaddrWithPeerId {
-				multiaddr: listen_addr,
-				peer_id: service1.local_peer_id,
-			}],
-			..Default::default()
-		})
-		.build()
-		.start_network();
-
-	async fn wait_for_events(stream: &mut (impl Stream<Item = Event> + std::marker::Unpin)) {
-		let mut notif_received = false;
-		let mut sync_received = false;
-		while !notif_received || !sync_received {
-			match stream.next().await.unwrap() {
-				Event::NotificationStreamOpened { .. } => notif_received = true,
-				Event::SyncConnected { .. } => sync_received = true,
-				_ => {},
-			};
-		}
-	}
-
-	wait_for_events(&mut event_stream1).await;
-	wait_for_events(&mut event_stream2).await;
-
-	{
-		let mut info = justification_info.write().unwrap();
-		*info = Some((service2.local_peer_id, H256::random(), 1337u64));
-	}
-
-	let wait_disconnection = async {
-		while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {}
-	};
-
-	if tokio::time::timeout(Duration::from_secs(5), wait_disconnection).await.is_err() {
-		panic!("did not receive disconnection event in time");
-	}
-}
-
-#[tokio::test]
-async fn disconnect_peer_using_chain_sync_handle() {
-	let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0);
-	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
-
-	let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new());
-	let (chain_sync_network_provider, chain_sync_network_handle) =
-		sc_network_sync::service::network::NetworkServiceProvider::new();
-	let handle_clone = chain_sync_network_handle.clone();
-
-	let (chain_sync, chain_sync_service, _) = ChainSync::new(
-		sc_network_common::sync::SyncMode::Full,
-		client.clone(),
-		ProtocolId::from("test-protocol-name"),
-		&Some(String::from("test-fork-id")),
-		Roles::from(&config::Role::Full),
-		Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator),
-		1u32,
-		None,
-		None,
-		chain_sync_network_handle.clone(),
-		import_queue,
-		ProtocolName::from("block-request"),
-		ProtocolName::from("state-request"),
-		None,
-	)
-	.unwrap();
-
-	let (node1, mut event_stream1) = TestNetworkBuilder::new()
-		.with_listen_addresses(vec![listen_addr.clone()])
-		.with_chain_sync((Box::new(chain_sync), Box::new(chain_sync_service)))
-		.with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle))
-		.with_client(client.clone())
-		.build()
-		.start_network();
-
-	let (node2, mut event_stream2) = TestNetworkBuilder::new()
-		.with_set_config(SetConfig {
-			reserved_nodes: vec![MultiaddrWithPeerId {
-				multiaddr: listen_addr,
-				peer_id: node1.local_peer_id,
-			}],
-			..Default::default()
-		})
-		.with_client(client.clone())
-		.build()
-		.start_network();
-
-	async fn wait_for_events(stream: &mut (impl Stream<Item = Event> + std::marker::Unpin)) {
-		let mut notif_received = false;
-		let mut sync_received = false;
-		while !notif_received || !sync_received {
-			match stream.next().await.unwrap() {
-				Event::NotificationStreamOpened { .. } => notif_received = true,
-				Event::SyncConnected { .. } => sync_received = true,
-				_ => {},
-			};
-		}
-	}
-
-	wait_for_events(&mut event_stream1).await;
-	wait_for_events(&mut event_stream2).await;
-
-	handle_clone.disconnect_peer(node2.local_peer_id, BLOCK_ANNOUNCE_PROTO_NAME.into());
-
-	let wait_disconnection = async {
-		while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {}
-	};
-
-	if tokio::time::timeout(Duration::from_secs(5), wait_disconnection).await.is_err() {
-		panic!("did not receive disconnection event in time");
-	}
-}
diff --git a/substrate/client/network/src/service/tests/mod.rs b/substrate/client/network/src/service/tests/mod.rs
index 3233b15840a..3ac7829003f 100644
--- a/substrate/client/network/src/service/tests/mod.rs
+++ b/substrate/client/network/src/service/tests/mod.rs
@@ -16,44 +16,36 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker};
+use crate::{config, NetworkService, NetworkWorker};
 
 use futures::prelude::*;
 use libp2p::Multiaddr;
-use sc_client_api::{BlockBackend, HeaderBackend};
 use sc_consensus::{ImportQueue, Link};
 use sc_network_common::{
-	config::{
-		NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig,
-		TransportConfig,
-	},
+	config::{NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig},
 	protocol::{event::Event, role::Roles},
 	service::NetworkEventStream,
-	sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT},
 };
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
 	block_request_handler::BlockRequestHandler,
+	engine::SyncingEngine,
 	service::network::{NetworkServiceHandle, NetworkServiceProvider},
 	state_request_handler::StateRequestHandler,
-	ChainSync,
 };
-use sp_runtime::traits::{Block as BlockT, Header as _, Zero};
+use sp_runtime::traits::{Block as BlockT, Header as _};
 use std::sync::Arc;
 use substrate_test_runtime_client::{
 	runtime::{Block as TestBlock, Hash as TestHash},
-	TestClient, TestClientBuilder, TestClientBuilderExt as _,
+	TestClientBuilder, TestClientBuilderExt as _,
 };
 
-#[cfg(test)]
-mod chain_sync;
 #[cfg(test)]
 mod service;
 
-type TestNetworkWorker = NetworkWorker<TestBlock, TestHash, TestClient>;
+type TestNetworkWorker = NetworkWorker<TestBlock, TestHash>;
 type TestNetworkService = NetworkService<TestBlock, TestHash>;
 
-const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces";
 const PROTOCOL_NAME: &str = "/foo";
 
 struct TestNetwork {
@@ -65,14 +57,6 @@ impl TestNetwork {
 		Self { network }
 	}
 
-	pub fn service(&self) -> &Arc<TestNetworkService> {
-		&self.network.service()
-	}
-
-	pub fn network(&mut self) -> &mut TestNetworkWorker {
-		&mut self.network
-	}
-
 	pub fn start_network(
 		self,
 	) -> (Arc<TestNetworkService>, (impl Stream<Item = Event> + std::marker::Unpin)) {
@@ -92,7 +76,6 @@ struct TestNetworkBuilder {
 	client: Option<Arc<substrate_test_runtime_client::TestClient>>,
 	listen_addresses: Vec<Multiaddr>,
 	set_config: Option<SetConfig>,
-	chain_sync: Option<(Box<dyn ChainSyncT<TestBlock>>, Box<dyn ChainSyncInterface<TestBlock>>)>,
 	chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>,
 	config: Option<config::NetworkConfiguration>,
 }
@@ -105,17 +88,11 @@ impl TestNetworkBuilder {
 			client: None,
 			listen_addresses: Vec::new(),
 			set_config: None,
-			chain_sync: None,
 			chain_sync_network: None,
 			config: None,
 		}
 	}
 
-	pub fn with_client(mut self, client: Arc<substrate_test_runtime_client::TestClient>) -> Self {
-		self.client = Some(client);
-		self
-	}
-
 	pub fn with_config(mut self, config: config::NetworkConfiguration) -> Self {
 		self.config = Some(config);
 		self
@@ -131,27 +108,6 @@ impl TestNetworkBuilder {
 		self
 	}
 
-	pub fn with_chain_sync(
-		mut self,
-		chain_sync: (Box<dyn ChainSyncT<TestBlock>>, Box<dyn ChainSyncInterface<TestBlock>>),
-	) -> Self {
-		self.chain_sync = Some(chain_sync);
-		self
-	}
-
-	pub fn with_chain_sync_network(
-		mut self,
-		chain_sync_network: (NetworkServiceProvider, NetworkServiceHandle),
-	) -> Self {
-		self.chain_sync_network = Some(chain_sync_network);
-		self
-	}
-
-	pub fn with_import_queue(mut self, import_queue: Box<dyn ImportQueue<TestBlock>>) -> Self {
-		self.import_queue = Some(import_queue);
-		self
-	}
-
 	pub fn build(mut self) -> TestNetwork {
 		let client = self.client.as_mut().map_or(
 			Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0),
@@ -240,73 +196,29 @@ impl TestNetworkBuilder {
 			protocol_config
 		};
 
-		let block_announce_config = NonDefaultSetConfig {
-			notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(),
-			fallback_names: vec![],
-			max_notification_size: 1024 * 1024,
-			handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::<
-				substrate_test_runtime_client::runtime::Block,
-			>::build(
-				Roles::from(&config::Role::Full),
-				client.info().best_number,
-				client.info().best_hash,
-				client
-					.block_hash(Zero::zero())
-					.ok()
-					.flatten()
-					.expect("Genesis block exists; qed"),
-			))),
-			set_config: SetConfig {
-				in_peers: 0,
-				out_peers: 0,
-				reserved_nodes: Vec::new(),
-				non_reserved_mode: NonReservedPeerMode::Deny,
-			},
-		};
-
 		let (chain_sync_network_provider, chain_sync_network_handle) =
 			self.chain_sync_network.unwrap_or(NetworkServiceProvider::new());
 
-		let (chain_sync, chain_sync_service) = self.chain_sync.unwrap_or({
-			let (chain_sync, chain_sync_service, _) = ChainSync::new(
-				match network_config.sync_mode {
-					config::SyncMode::Full => sc_network_common::sync::SyncMode::Full,
-					config::SyncMode::Fast { skip_proofs, storage_chain_mode } =>
-						sc_network_common::sync::SyncMode::LightState {
-							skip_proofs,
-							storage_chain_mode,
-						},
-					config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp,
-				},
-				client.clone(),
-				protocol_id.clone(),
-				&fork_id,
-				Roles::from(&config::Role::Full),
-				Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator),
-				network_config.max_parallel_downloads,
-				None,
-				None,
-				chain_sync_network_handle,
-				import_queue.service(),
-				block_request_protocol_config.name.clone(),
-				state_request_protocol_config.name.clone(),
-				None,
-			)
-			.unwrap();
-
-			if let None = self.link {
-				self.link = Some(Box::new(chain_sync_service.clone()));
-			}
-			(Box::new(chain_sync), Box::new(chain_sync_service))
-		});
-		let mut link = self
-			.link
-			.unwrap_or(Box::new(sc_network_sync::service::mock::MockChainSyncInterface::new()));
-
+		let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new(
+			Roles::from(&config::Role::Full),
+			client.clone(),
+			None,
+			&network_config,
+			protocol_id.clone(),
+			&None,
+			Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator),
+			None,
+			chain_sync_network_handle,
+			import_queue.service(),
+			block_request_protocol_config.name.clone(),
+			state_request_protocol_config.name.clone(),
+			None,
+		)
+		.unwrap();
+		let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone()));
 		let worker = NetworkWorker::<
 			substrate_test_runtime_client::runtime::Block,
 			substrate_test_runtime_client::runtime::Hash,
-			substrate_test_runtime_client::TestClient,
 		>::new(config::Params {
 			block_announce_config,
 			role: config::Role::Full,
@@ -317,8 +229,6 @@ impl TestNetworkBuilder {
 			chain: client.clone(),
 			protocol_id,
 			fork_id,
-			chain_sync,
-			chain_sync_service,
 			metrics_registry: None,
 			request_response_protocol_configs: [
 				block_request_protocol_config,
@@ -343,6 +253,8 @@ impl TestNetworkBuilder {
 				tokio::time::sleep(std::time::Duration::from_millis(250)).await;
 			}
 		});
+		let stream = worker.service().event_stream("syncing");
+		tokio::spawn(engine.run(stream));
 
 		TestNetwork::new(worker)
 	}
diff --git a/substrate/client/network/src/service/tests/service.rs b/substrate/client/network/src/service/tests/service.rs
index 1c7b32ff0dd..9c4c0ad6e0c 100644
--- a/substrate/client/network/src/service/tests/service.rs
+++ b/substrate/client/network/src/service/tests/service.rs
@@ -32,7 +32,6 @@ type TestNetworkService = NetworkService<
 	substrate_test_runtime_client::runtime::Hash,
 >;
 
-const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces";
 const PROTOCOL_NAME: &str = "/foo";
 
 /// Builds two nodes and their associated events stream.
@@ -196,10 +195,6 @@ async fn notifications_state_consistent() {
 			},
 
 			// Add new events here.
-			future::Either::Left(Event::SyncConnected { .. }) => {},
-			future::Either::Right(Event::SyncConnected { .. }) => {},
-			future::Either::Left(Event::SyncDisconnected { .. }) => {},
-			future::Either::Right(Event::SyncDisconnected { .. }) => {},
 			future::Either::Left(Event::Dht(_)) => {},
 			future::Either::Right(Event::Dht(_)) => {},
 		};
@@ -208,6 +203,7 @@ async fn notifications_state_consistent() {
 
 #[tokio::test]
 async fn lots_of_incoming_peers_works() {
+	sp_tracing::try_init_simple();
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
 	let (main_node, _) = TestNetworkBuilder::new()
@@ -241,6 +237,7 @@ async fn lots_of_incoming_peers_works() {
 			let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse();
 
 			let mut event_stream = event_stream.fuse();
+			let mut sync_protocol_name = None;
 			loop {
 				futures::select! {
 					_ = timer => {
@@ -249,15 +246,21 @@ async fn lots_of_incoming_peers_works() {
 					}
 					ev = event_stream.next() => {
 						match ev.unwrap() {
-							Event::NotificationStreamOpened { remote, .. } => {
+							Event::NotificationStreamOpened { protocol, remote, .. } => {
+								if let None = sync_protocol_name {
+									sync_protocol_name = Some(protocol.clone());
+								}
+
 								assert_eq!(remote, main_node_peer_id);
 								// Test succeeds after 5 seconds. This timer is here in order to
 								// detect a potential problem after opening.
 								timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse();
 							}
-							Event::NotificationStreamClosed { .. } => {
-								// Test failed.
-								panic!();
+							Event::NotificationStreamClosed { protocol, .. } => {
+								if Some(protocol) != sync_protocol_name {
+									// Test failed.
+									panic!();
+								}
 							}
 							_ => {}
 						}
@@ -282,10 +285,19 @@ async fn notifications_back_pressure() {
 
 	let receiver = tokio::spawn(async move {
 		let mut received_notifications = 0;
+		let mut sync_protocol_name = None;
 
 		while received_notifications < TOTAL_NOTIFS {
 			match events_stream2.next().await.unwrap() {
-				Event::NotificationStreamClosed { .. } => panic!(),
+				Event::NotificationStreamOpened { protocol, .. } =>
+					if let None = sync_protocol_name {
+						sync_protocol_name = Some(protocol);
+					},
+				Event::NotificationStreamClosed { protocol, .. } => {
+					if Some(&protocol) != sync_protocol_name.as_ref() {
+						panic!()
+					}
+				},
 				Event::NotificationsReceived { messages, .. } =>
 					for message in messages {
 						assert_eq!(message.0, PROTOCOL_NAME.into());
@@ -387,42 +399,6 @@ async fn fallback_name_working() {
 	receiver.await.unwrap();
 }
 
-// Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement
-// protocol name and verify that `SyncDisconnected` event is emitted
-#[tokio::test]
-async fn disconnect_sync_peer_using_block_announcement_protocol_name() {
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
-
-	async fn wait_for_events(stream: &mut (impl Stream<Item = Event> + std::marker::Unpin)) {
-		let mut notif_received = false;
-		let mut sync_received = false;
-
-		while !notif_received || !sync_received {
-			match stream.next().await.unwrap() {
-				Event::NotificationStreamOpened { .. } => notif_received = true,
-				Event::SyncConnected { .. } => sync_received = true,
-				_ => {},
-			};
-		}
-	}
-
-	wait_for_events(&mut events_stream1).await;
-	wait_for_events(&mut events_stream2).await;
-
-	// disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted
-	node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into());
-	assert!(std::matches!(
-		events_stream2.next().await,
-		Some(Event::NotificationStreamClosed { .. })
-	));
-	let _ = events_stream2.next().await; // ignore the reopen event
-
-	// now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is
-	// emitted
-	node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into());
-	assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. })));
-}
-
 #[tokio::test]
 #[should_panic(expected = "don't match the transport")]
 async fn ensure_listen_addresses_consistent_with_transport_memory() {
diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml
index bd51776d6f8..52ab0d15e16 100644
--- a/substrate/client/network/sync/Cargo.toml
+++ b/substrate/client/network/sync/Cargo.toml
@@ -20,6 +20,7 @@ array-bytes = "4.1"
 async-trait = "0.1.58"
 codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] }
 futures = "0.3.21"
+futures-timer = "3.0.2"
 libp2p = "0.50.0"
 log = "0.4.17"
 lru = "0.8.1"
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
new file mode 100644
index 00000000000..25cd3968c29
--- /dev/null
+++ b/substrate/client/network/sync/src/engine.rs
@@ -0,0 +1,924 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! `SyncingEngine` is the actor responsible for syncing Substrate chain
+//! to tip and keep the blockchain up to date with network updates.
+
+use crate::{
+	service::{self, chain_sync::ToServiceCommand},
+	ChainSync, ClientError, SyncingService,
+};
+
+use futures::{FutureExt, Stream, StreamExt};
+use libp2p::PeerId;
+use lru::LruCache;
+use prometheus_endpoint::{
+	register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64,
+};
+
+use codec::{Decode, DecodeAll, Encode};
+use futures_timer::Delay;
+use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider};
+use sc_consensus::import_queue::ImportQueueService;
+use sc_network_common::{
+	config::{
+		NetworkConfiguration, NonDefaultSetConfig, ProtocolId, SyncMode as SyncOperationMode,
+	},
+	protocol::{event::Event, role::Roles, ProtocolName},
+	sync::{
+		message::{
+			generic::{BlockData, BlockResponse},
+			BlockAnnounce, BlockAnnouncesHandshake, BlockState,
+		},
+		warp::WarpSyncParams,
+		BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent,
+		SyncMode,
+	},
+	utils::LruHashSet,
+};
+use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
+use sp_blockchain::HeaderMetadata;
+use sp_consensus::block_validation::BlockAnnounceValidator;
+use sp_runtime::{
+	traits::{Block as BlockT, CheckedSub, Header, NumberFor, Zero},
+	SaturatedConversion,
+};
+
+use std::{
+	collections::{HashMap, HashSet},
+	num::NonZeroUsize,
+	pin::Pin,
+	sync::{
+		atomic::{AtomicBool, AtomicUsize, Ordering},
+		Arc,
+	},
+	task::Poll,
+};
+
+/// Interval at which we perform time based maintenance
+const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100);
+
+/// When light node connects to the full node and the full node is behind light node
+/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful
+/// and disconnect to free connection slot.
+const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192;
+
+/// Maximum number of known block hashes to keep for a peer.
+const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead
+
+mod rep {
+	use sc_peerset::ReputationChange as Rep;
+	/// Reputation change when we are a light client and a peer is behind us.
+	pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer");
+	/// We received a message that failed to decode.
+	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
+	/// Peer has different genesis.
+	pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch");
+	/// Peer role does not match (e.g. light peer connecting to another light peer).
+	pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role");
+	/// Peer send us a block announcement that failed at validation.
+	pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement");
+}
+
+struct Metrics {
+	peers: Gauge<U64>,
+	queued_blocks: Gauge<U64>,
+	fork_targets: Gauge<U64>,
+	justifications: GaugeVec<U64>,
+}
+
+impl Metrics {
+	fn register(r: &Registry, major_syncing: Arc<AtomicBool>) -> Result<Self, PrometheusError> {
+		let _ = MajorSyncingGauge::register(r, major_syncing)?;
+		Ok(Self {
+			peers: {
+				let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?;
+				register(g, r)?
+			},
+			queued_blocks: {
+				let g =
+					Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?;
+				register(g, r)?
+			},
+			fork_targets: {
+				let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?;
+				register(g, r)?
+			},
+			justifications: {
+				let g = GaugeVec::new(
+					Opts::new(
+						"substrate_sync_extra_justifications",
+						"Number of extra justifications requests",
+					),
+					&["status"],
+				)?;
+				register(g, r)?
+			},
+		})
+	}
+}
+
+/// The "major syncing" metric.
+#[derive(Clone)]
+pub struct MajorSyncingGauge(Arc<AtomicBool>);
+
+impl MajorSyncingGauge {
+	/// Registers the [`MajorSyncGauge`] metric whose value is
+	/// obtained from the given `AtomicBool`.
+	fn register(registry: &Registry, value: Arc<AtomicBool>) -> Result<(), PrometheusError> {
+		prometheus_endpoint::register(
+			SourcedGauge::new(
+				&Opts::new(
+					"substrate_sub_libp2p_is_major_syncing",
+					"Whether the node is performing a major sync or not.",
+				),
+				MajorSyncingGauge(value),
+			)?,
+			registry,
+		)?;
+
+		Ok(())
+	}
+}
+
+impl MetricSource for MajorSyncingGauge {
+	type N = u64;
+
+	fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) {
+		set(&[], self.0.load(Ordering::Relaxed) as u64);
+	}
+}
+
+/// Peer information
+#[derive(Debug)]
+pub struct Peer<B: BlockT> {
+	pub info: ExtendedPeerInfo<B>,
+	/// Holds a set of blocks known to this peer.
+	pub known_blocks: LruHashSet<B::Hash>,
+}
+
+pub struct SyncingEngine<B: BlockT, Client> {
+	/// State machine that handles the list of in-progress requests. Only full node peers are
+	/// registered.
+	chain_sync: ChainSync<B, Client>,
+
+	/// Blockchain client.
+	client: Arc<Client>,
+
+	/// Number of peers we're connected to.
+	num_connected: Arc<AtomicUsize>,
+
+	/// Are we actively catching up with the chain?
+	is_major_syncing: Arc<AtomicBool>,
+
+	/// Network service.
+	network_service: service::network::NetworkServiceHandle,
+
+	/// Channel for receiving service commands
+	service_rx: TracingUnboundedReceiver<ToServiceCommand<B>>,
+
+	/// Assigned roles.
+	roles: Roles,
+
+	/// Genesis hash.
+	genesis_hash: B::Hash,
+
+	/// Set of channels for other protocols that have subscribed to syncing events.
+	event_streams: Vec<TracingUnboundedSender<SyncEvent>>,
+
+	/// Interval at which we call `tick`.
+	tick_timeout: Delay,
+
+	/// All connected peers. Contains both full and light node peers.
+	peers: HashMap<PeerId, Peer<B>>,
+
+	/// List of nodes for which we perform additional logging because they are important for the
+	/// user.
+	important_peers: HashSet<PeerId>,
+
+	/// Actual list of connected no-slot nodes.
+	default_peers_set_no_slot_connected_peers: HashSet<PeerId>,
+
+	/// List of nodes that should never occupy peer slots.
+	default_peers_set_no_slot_peers: HashSet<PeerId>,
+
+	/// Value that was passed as part of the configuration. Used to cap the number of full
+	/// nodes.
+	default_peers_set_num_full: usize,
+
+	/// Number of slots to allocate to light nodes.
+	default_peers_set_num_light: usize,
+
+	/// A cache for the data that was associated to a block announcement.
+	block_announce_data_cache: LruCache<B::Hash, Vec<u8>>,
+
+	/// The `PeerId`'s of all boot nodes.
+	boot_node_ids: HashSet<PeerId>,
+
+	/// Protocol name used for block announcements
+	block_announce_protocol_name: ProtocolName,
+
+	/// Prometheus metrics.
+	metrics: Option<Metrics>,
+}
+
+impl<B: BlockT, Client> SyncingEngine<B, Client>
+where
+	B: BlockT,
+	Client: HeaderBackend<B>
+		+ BlockBackend<B>
+		+ HeaderMetadata<B, Error = sp_blockchain::Error>
+		+ ProofProvider<B>
+		+ Send
+		+ Sync
+		+ 'static,
+{
+	pub fn new(
+		roles: Roles,
+		client: Arc<Client>,
+		metrics_registry: Option<&Registry>,
+		network_config: &NetworkConfiguration,
+		protocol_id: ProtocolId,
+		fork_id: &Option<String>,
+		block_announce_validator: Box<dyn BlockAnnounceValidator<B> + Send>,
+		warp_sync_params: Option<WarpSyncParams<B>>,
+		network_service: service::network::NetworkServiceHandle,
+		import_queue: Box<dyn ImportQueueService<B>>,
+		block_request_protocol_name: ProtocolName,
+		state_request_protocol_name: ProtocolName,
+		warp_sync_protocol_name: Option<ProtocolName>,
+	) -> Result<(Self, SyncingService<B>, NonDefaultSetConfig), ClientError> {
+		let mode = match network_config.sync_mode {
+			SyncOperationMode::Full => SyncMode::Full,
+			SyncOperationMode::Fast { skip_proofs, storage_chain_mode } =>
+				SyncMode::LightState { skip_proofs, storage_chain_mode },
+			SyncOperationMode::Warp => SyncMode::Warp,
+		};
+		let max_parallel_downloads = network_config.max_parallel_downloads;
+		let cache_capacity = NonZeroUsize::new(
+			(network_config.default_peers_set.in_peers as usize +
+				network_config.default_peers_set.out_peers as usize)
+				.max(1),
+		)
+		.expect("cache capacity is not zero");
+		let important_peers = {
+			let mut imp_p = HashSet::new();
+			for reserved in &network_config.default_peers_set.reserved_nodes {
+				imp_p.insert(reserved.peer_id);
+			}
+			for reserved in network_config
+				.extra_sets
+				.iter()
+				.flat_map(|s| s.set_config.reserved_nodes.iter())
+			{
+				imp_p.insert(reserved.peer_id);
+			}
+			imp_p.shrink_to_fit();
+			imp_p
+		};
+		let boot_node_ids = {
+			let mut list = HashSet::new();
+			for node in &network_config.boot_nodes {
+				list.insert(node.peer_id);
+			}
+			list.shrink_to_fit();
+			list
+		};
+		let default_peers_set_no_slot_peers = {
+			let mut no_slot_p: HashSet<PeerId> = network_config
+				.default_peers_set
+				.reserved_nodes
+				.iter()
+				.map(|reserved| reserved.peer_id)
+				.collect();
+			no_slot_p.shrink_to_fit();
+			no_slot_p
+		};
+		let default_peers_set_num_full = network_config.default_peers_set_num_full as usize;
+		let default_peers_set_num_light = {
+			let total = network_config.default_peers_set.out_peers +
+				network_config.default_peers_set.in_peers;
+			total.saturating_sub(network_config.default_peers_set_num_full) as usize
+		};
+
+		let (chain_sync, block_announce_config) = ChainSync::new(
+			mode,
+			client.clone(),
+			protocol_id,
+			fork_id,
+			roles,
+			block_announce_validator,
+			max_parallel_downloads,
+			warp_sync_params,
+			metrics_registry,
+			network_service.clone(),
+			import_queue,
+			block_request_protocol_name,
+			state_request_protocol_name,
+			warp_sync_protocol_name,
+		)?;
+
+		let block_announce_protocol_name = block_announce_config.notifications_protocol.clone();
+		let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000);
+		let num_connected = Arc::new(AtomicUsize::new(0));
+		let is_major_syncing = Arc::new(AtomicBool::new(false));
+		let genesis_hash = client
+			.block_hash(0u32.into())
+			.ok()
+			.flatten()
+			.expect("Genesis block exists; qed");
+
+		Ok((
+			Self {
+				roles,
+				client,
+				chain_sync,
+				network_service,
+				peers: HashMap::new(),
+				block_announce_data_cache: LruCache::new(cache_capacity),
+				block_announce_protocol_name,
+				num_connected: num_connected.clone(),
+				is_major_syncing: is_major_syncing.clone(),
+				service_rx,
+				genesis_hash,
+				important_peers,
+				default_peers_set_no_slot_connected_peers: HashSet::new(),
+				boot_node_ids,
+				default_peers_set_no_slot_peers,
+				default_peers_set_num_full,
+				default_peers_set_num_light,
+				event_streams: Vec::new(),
+				tick_timeout: Delay::new(TICK_TIMEOUT),
+				metrics: if let Some(r) = metrics_registry {
+					match Metrics::register(r, is_major_syncing.clone()) {
+						Ok(metrics) => Some(metrics),
+						Err(err) => {
+							log::error!(target: "sync", "Failed to register metrics {err:?}");
+							None
+						},
+					}
+				} else {
+					None
+				},
+			},
+			SyncingService::new(tx, num_connected, is_major_syncing),
+			block_announce_config,
+		))
+	}
+
+	/// Report Prometheus metrics.
+	pub fn report_metrics(&self) {
+		if let Some(metrics) = &self.metrics {
+			let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX);
+			metrics.peers.set(n);
+
+			let m = self.chain_sync.metrics();
+
+			metrics.fork_targets.set(m.fork_targets.into());
+			metrics.queued_blocks.set(m.queued_blocks.into());
+
+			metrics
+				.justifications
+				.with_label_values(&["pending"])
+				.set(m.justifications.pending_requests.into());
+			metrics
+				.justifications
+				.with_label_values(&["active"])
+				.set(m.justifications.active_requests.into());
+			metrics
+				.justifications
+				.with_label_values(&["failed"])
+				.set(m.justifications.failed_requests.into());
+			metrics
+				.justifications
+				.with_label_values(&["importing"])
+				.set(m.justifications.importing_requests.into());
+		}
+	}
+
+	fn update_peer_info(&mut self, who: &PeerId) {
+		if let Some(info) = self.chain_sync.peer_info(who) {
+			if let Some(ref mut peer) = self.peers.get_mut(who) {
+				peer.info.best_hash = info.best_hash;
+				peer.info.best_number = info.best_number;
+			}
+		}
+	}
+
+	/// Process the result of the block announce validation.
+	pub fn process_block_announce_validation_result(
+		&mut self,
+		validation_result: PollBlockAnnounceValidation<B::Header>,
+	) {
+		let (header, _is_best, who) = match validation_result {
+			PollBlockAnnounceValidation::Skip => return,
+			PollBlockAnnounceValidation::Nothing { is_best: _, who, announce } => {
+				self.update_peer_info(&who);
+
+				if let Some(data) = announce.data {
+					if !data.is_empty() {
+						self.block_announce_data_cache.put(announce.header.hash(), data);
+					}
+				}
+
+				return
+			},
+			PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => {
+				self.update_peer_info(&who);
+
+				if let Some(data) = announce.data {
+					if !data.is_empty() {
+						self.block_announce_data_cache.put(announce.header.hash(), data);
+					}
+				}
+
+				(announce.header, is_best, who)
+			},
+			PollBlockAnnounceValidation::Failure { who, disconnect } => {
+				if disconnect {
+					self.network_service
+						.disconnect_peer(who, self.block_announce_protocol_name.clone());
+				}
+
+				self.network_service.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT);
+				return
+			},
+		};
+
+		// to import header from announced block let's construct response to request that normally
+		// would have been sent over network (but it is not in our case)
+		let blocks_to_import = self.chain_sync.on_block_data(
+			&who,
+			None,
+			BlockResponse {
+				id: 0,
+				blocks: vec![BlockData {
+					hash: header.hash(),
+					header: Some(header),
+					body: None,
+					indexed_body: None,
+					receipt: None,
+					message_queue: None,
+					justification: None,
+					justifications: None,
+				}],
+			},
+		);
+
+		self.chain_sync.process_block_response_data(blocks_to_import);
+	}
+
+	/// Push a block announce validation.
+	///
+	/// It is required that [`ChainSync::poll_block_announce_validation`] is
+	/// called later to check for finished validations. The result of the validation
+	/// needs to be passed to [`SyncingEngine::process_block_announce_validation_result`]
+	/// to finish the processing.
+	///
+	/// # Note
+	///
+	/// This will internally create a future, but this future will not be registered
+	/// in the task before being polled once. So, it is required to call
+	/// [`ChainSync::poll_block_announce_validation`] to ensure that the future is
+	/// registered properly and will wake up the task when being ready.
+	pub fn push_block_announce_validation(
+		&mut self,
+		who: PeerId,
+		announce: BlockAnnounce<B::Header>,
+	) {
+		let hash = announce.header.hash();
+
+		let peer = match self.peers.get_mut(&who) {
+			Some(p) => p,
+			None => {
+				log::error!(target: "sync", "Received block announce from disconnected peer {}", who);
+				debug_assert!(false);
+				return
+			},
+		};
+		peer.known_blocks.insert(hash);
+
+		if peer.info.roles.is_full() {
+			let is_best = match announce.state.unwrap_or(BlockState::Best) {
+				BlockState::Best => true,
+				BlockState::Normal => false,
+			};
+
+			self.chain_sync.push_block_announce_validation(who, hash, announce, is_best);
+		}
+	}
+
+	/// Make sure an important block is propagated to peers.
+	///
+	/// In chain-based consensus, we often need to make sure non-best forks are
+	/// at least temporarily synced.
+	pub fn announce_block(&mut self, hash: B::Hash, data: Option<Vec<u8>>) {
+		let header = match self.client.header(hash) {
+			Ok(Some(header)) => header,
+			Ok(None) => {
+				log::warn!(target: "sync", "Trying to announce unknown block: {}", hash);
+				return
+			},
+			Err(e) => {
+				log::warn!(target: "sync", "Error reading block header {}: {}", hash, e);
+				return
+			},
+		};
+
+		// don't announce genesis block since it will be ignored
+		if header.number().is_zero() {
+			return
+		}
+
+		let is_best = self.client.info().best_hash == hash;
+		log::debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best);
+
+		let data = data
+			.or_else(|| self.block_announce_data_cache.get(&hash).cloned())
+			.unwrap_or_default();
+
+		for (who, ref mut peer) in self.peers.iter_mut() {
+			let inserted = peer.known_blocks.insert(hash);
+			if inserted {
+				log::trace!(target: "sync", "Announcing block {:?} to {}", hash, who);
+				let message = BlockAnnounce {
+					header: header.clone(),
+					state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) },
+					data: Some(data.clone()),
+				};
+
+				self.network_service.write_notification(
+					*who,
+					self.block_announce_protocol_name.clone(),
+					message.encode(),
+				);
+			}
+		}
+	}
+
+	/// Inform sync about new best imported block.
+	pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor<B>) {
+		log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number);
+
+		self.chain_sync.update_chain_info(&hash, number);
+		self.network_service.set_notification_handshake(
+			self.block_announce_protocol_name.clone(),
+			BlockAnnouncesHandshake::<B>::build(self.roles, number, hash, self.genesis_hash)
+				.encode(),
+		)
+	}
+
+	pub async fn run(mut self, mut stream: Pin<Box<dyn Stream<Item = Event> + Send>>) {
+		loop {
+			futures::future::poll_fn(|cx| self.poll(cx, &mut stream)).await;
+		}
+	}
+
+	pub fn poll(
+		&mut self,
+		cx: &mut std::task::Context,
+		event_stream: &mut Pin<Box<dyn Stream<Item = Event> + Send>>,
+	) -> Poll<()> {
+		self.num_connected.store(self.peers.len(), Ordering::Relaxed);
+		self.is_major_syncing
+			.store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed);
+
+		while let Poll::Ready(()) = self.tick_timeout.poll_unpin(cx) {
+			self.report_metrics();
+			self.tick_timeout.reset(TICK_TIMEOUT);
+		}
+
+		while let Poll::Ready(Some(event)) = event_stream.poll_next_unpin(cx) {
+			match event {
+				Event::NotificationStreamOpened {
+					remote, protocol, received_handshake, ..
+				} => {
+					if protocol != self.block_announce_protocol_name {
+						continue
+					}
+
+					match <BlockAnnouncesHandshake<B> as DecodeAll>::decode_all(
+						&mut &received_handshake[..],
+					) {
+						Ok(handshake) => {
+							if self.on_sync_peer_connected(remote, handshake).is_err() {
+								log::debug!(
+									target: "sync",
+									"Failed to register peer {remote:?}: {received_handshake:?}",
+								);
+							}
+						},
+						Err(err) => {
+							log::debug!(
+								target: "sync",
+								"Couldn't decode handshake sent by {}: {:?}: {}",
+								remote,
+								received_handshake,
+								err,
+							);
+							self.network_service.report_peer(remote, rep::BAD_MESSAGE);
+						},
+					}
+				},
+				Event::NotificationStreamClosed { remote, protocol } => {
+					if protocol != self.block_announce_protocol_name {
+						continue
+					}
+
+					if self.on_sync_peer_disconnected(remote).is_err() {
+						log::trace!(
+							target: "sync",
+							"Disconnected peer which had earlier been refused by on_sync_peer_connected {}",
+							remote
+						);
+					}
+				},
+				Event::NotificationsReceived { remote, messages } => {
+					for (protocol, message) in messages {
+						if protocol != self.block_announce_protocol_name {
+							continue
+						}
+
+						if self.peers.contains_key(&remote) {
+							if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) {
+								self.push_block_announce_validation(remote, announce);
+
+								// Make sure that the newly added block announce validation future
+								// was polled once to be registered in the task.
+								if let Poll::Ready(res) =
+									self.chain_sync.poll_block_announce_validation(cx)
+								{
+									self.process_block_announce_validation_result(res)
+								}
+							} else {
+								log::warn!(target: "sub-libp2p", "Failed to decode block announce");
+							}
+						} else {
+							log::trace!(
+								target: "sync",
+								"Received sync for peer earlier refused by sync layer: {}",
+								remote
+							);
+						}
+					}
+				},
+				_ => {},
+			}
+		}
+
+		while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) {
+			match event {
+				ToServiceCommand::SetSyncForkRequest(peers, hash, number) => {
+					self.chain_sync.set_sync_fork_request(peers, &hash, number);
+				},
+				ToServiceCommand::EventStream(tx) => self.event_streams.push(tx),
+				ToServiceCommand::RequestJustification(hash, number) =>
+					self.chain_sync.request_justification(&hash, number),
+				ToServiceCommand::ClearJustificationRequests =>
+					self.chain_sync.clear_justification_requests(),
+				ToServiceCommand::BlocksProcessed(imported, count, results) => {
+					for result in self.chain_sync.on_blocks_processed(imported, count, results) {
+						match result {
+							Ok((id, req)) => self.chain_sync.send_block_request(id, req),
+							Err(BadPeer(id, repu)) => {
+								self.network_service
+									.disconnect_peer(id, self.block_announce_protocol_name.clone());
+								self.network_service.report_peer(id, repu)
+							},
+						}
+					}
+				},
+				ToServiceCommand::JustificationImported(peer, hash, number, success) => {
+					self.chain_sync.on_justification_import(hash, number, success);
+					if !success {
+						log::info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash);
+						self.network_service
+							.disconnect_peer(peer, self.block_announce_protocol_name.clone());
+						self.network_service.report_peer(
+							peer,
+							sc_peerset::ReputationChange::new_fatal("Invalid justification"),
+						);
+					}
+				},
+				ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data),
+				ToServiceCommand::NewBestBlockImported(hash, number) =>
+					self.new_best_block_imported(hash, number),
+				ToServiceCommand::Status(tx) => {
+					let _ = tx.send(self.chain_sync.status());
+				},
+				ToServiceCommand::NumActivePeers(tx) => {
+					let _ = tx.send(self.chain_sync.num_active_peers());
+				},
+				ToServiceCommand::SyncState(tx) => {
+					let _ = tx.send(self.chain_sync.status());
+				},
+				ToServiceCommand::BestSeenBlock(tx) => {
+					let _ = tx.send(self.chain_sync.status().best_seen_block);
+				},
+				ToServiceCommand::NumSyncPeers(tx) => {
+					let _ = tx.send(self.chain_sync.status().num_peers);
+				},
+				ToServiceCommand::NumQueuedBlocks(tx) => {
+					let _ = tx.send(self.chain_sync.status().queued_blocks);
+				},
+				ToServiceCommand::NumDownloadedBlocks(tx) => {
+					let _ = tx.send(self.chain_sync.num_downloaded_blocks());
+				},
+				ToServiceCommand::NumSyncRequests(tx) => {
+					let _ = tx.send(self.chain_sync.num_sync_requests());
+				},
+				ToServiceCommand::PeersInfo(tx) => {
+					let peers_info =
+						self.peers.iter().map(|(id, peer)| (*id, peer.info.clone())).collect();
+					let _ = tx.send(peers_info);
+				},
+				ToServiceCommand::OnBlockFinalized(hash, header) =>
+					self.chain_sync.on_block_finalized(&hash, *header.number()),
+			}
+		}
+
+		while let Poll::Ready(result) = self.chain_sync.poll(cx) {
+			self.process_block_announce_validation_result(result);
+		}
+
+		Poll::Pending
+	}
+
+	/// Called by peer when it is disconnecting.
+	///
+	/// Returns a result if the handshake of this peer was indeed accepted.
+	pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> {
+		if self.important_peers.contains(&peer) {
+			log::warn!(target: "sync", "Reserved peer {} disconnected", peer);
+		} else {
+			log::debug!(target: "sync", "{} disconnected", peer);
+		}
+
+		if self.peers.remove(&peer).is_some() {
+			self.chain_sync.peer_disconnected(&peer);
+			self.default_peers_set_no_slot_connected_peers.remove(&peer);
+			self.event_streams
+				.retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer)).is_ok());
+			Ok(())
+		} else {
+			Err(())
+		}
+	}
+
+	/// Called on the first connection between two peers on the default set, after their exchange
+	/// of handshake.
+	///
+	/// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync
+	/// from.
+	pub fn on_sync_peer_connected(
+		&mut self,
+		who: PeerId,
+		status: BlockAnnouncesHandshake<B>,
+	) -> Result<(), ()> {
+		log::trace!(target: "sync", "New peer {} {:?}", who, status);
+
+		if self.peers.contains_key(&who) {
+			log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who);
+			debug_assert!(false);
+			return Err(())
+		}
+
+		if status.genesis_hash != self.genesis_hash {
+			self.network_service.report_peer(who, rep::GENESIS_MISMATCH);
+			self.network_service
+				.disconnect_peer(who, self.block_announce_protocol_name.clone());
+
+			if self.important_peers.contains(&who) {
+				log::error!(
+					target: "sync",
+					"Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})",
+					who,
+					self.genesis_hash,
+					status.genesis_hash,
+				);
+			} else if self.boot_node_ids.contains(&who) {
+				log::error!(
+					target: "sync",
+					"Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})",
+					who,
+					self.genesis_hash,
+					status.genesis_hash,
+				);
+			} else {
+				log::debug!(
+					target: "sync",
+					"Peer is on different chain (our genesis: {} theirs: {})",
+					self.genesis_hash, status.genesis_hash
+				);
+			}
+
+			return Err(())
+		}
+
+		if self.roles.is_light() {
+			// we're not interested in light peers
+			if status.roles.is_light() {
+				log::debug!(target: "sync", "Peer {} is unable to serve light requests", who);
+				self.network_service.report_peer(who, rep::BAD_ROLE);
+				self.network_service
+					.disconnect_peer(who, self.block_announce_protocol_name.clone());
+				return Err(())
+			}
+
+			// we don't interested in peers that are far behind us
+			let self_best_block = self.client.info().best_number;
+			let blocks_difference = self_best_block
+				.checked_sub(&status.best_number)
+				.unwrap_or_else(Zero::zero)
+				.saturated_into::<u64>();
+			if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE {
+				log::debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who);
+				self.network_service.report_peer(who, rep::PEER_BEHIND_US_LIGHT);
+				self.network_service
+					.disconnect_peer(who, self.block_announce_protocol_name.clone());
+				return Err(())
+			}
+		}
+
+		let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who);
+		let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };
+
+		if status.roles.is_full() &&
+			self.chain_sync.num_peers() >=
+				self.default_peers_set_num_full +
+					self.default_peers_set_no_slot_connected_peers.len() +
+					this_peer_reserved_slot
+		{
+			log::debug!(target: "sync", "Too many full nodes, rejecting {}", who);
+			self.network_service
+				.disconnect_peer(who, self.block_announce_protocol_name.clone());
+			return Err(())
+		}
+
+		if status.roles.is_light() &&
+			(self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light
+		{
+			// Make sure that not all slots are occupied by light clients.
+			log::debug!(target: "sync", "Too many light nodes, rejecting {}", who);
+			self.network_service
+				.disconnect_peer(who, self.block_announce_protocol_name.clone());
+			return Err(())
+		}
+
+		let peer = Peer {
+			info: ExtendedPeerInfo {
+				roles: status.roles,
+				best_hash: status.best_hash,
+				best_number: status.best_number,
+			},
+			known_blocks: LruHashSet::new(
+				NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"),
+			),
+		};
+
+		let req = if peer.info.roles.is_full() {
+			match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) {
+				Ok(req) => req,
+				Err(BadPeer(id, repu)) => {
+					self.network_service
+						.disconnect_peer(id, self.block_announce_protocol_name.clone());
+					self.network_service.report_peer(id, repu);
+					return Err(())
+				},
+			}
+		} else {
+			None
+		};
+
+		log::debug!(target: "sync", "Connected {}", who);
+
+		self.peers.insert(who, peer);
+		if no_slot_peer {
+			self.default_peers_set_no_slot_connected_peers.insert(who);
+		}
+
+		if let Some(req) = req {
+			self.chain_sync.send_block_request(who, req);
+		}
+
+		self.event_streams
+			.retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(who)).is_ok());
+
+		Ok(())
+	}
+}
diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs
index f710215e741..23269b02bd1 100644
--- a/substrate/client/network/sync/src/lib.rs
+++ b/substrate/client/network/sync/src/lib.rs
@@ -30,20 +30,18 @@
 
 pub mod block_request_handler;
 pub mod blocks;
+pub mod engine;
 pub mod mock;
 mod schema;
 pub mod service;
 pub mod state;
 pub mod state_request_handler;
-#[cfg(test)]
-mod tests;
 pub mod warp;
 pub mod warp_request_handler;
 
 use crate::{
 	blocks::BlockCollection,
 	schema::v1::{StateRequest, StateResponse},
-	service::chain_sync::{ChainSyncInterfaceHandle, ToServiceCommand},
 	state::StateSync,
 	warp::{WarpProofImportResult, WarpSync},
 };
@@ -78,7 +76,7 @@ use sc_network_common::{
 		SyncState, SyncStatus,
 	},
 };
-use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
+pub use service::chain_sync::SyncingService;
 use sp_arithmetic::traits::Saturating;
 use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
 use sp_consensus::{
@@ -327,8 +325,6 @@ pub struct ChainSync<B: BlockT, Client> {
 	import_existing: bool,
 	/// Gap download process.
 	gap_sync: Option<GapSync<B>>,
-	/// Channel for receiving service commands
-	service_rx: TracingUnboundedReceiver<ToServiceCommand<B>>,
 	/// Handle for communicating with `NetworkService`
 	network_service: service::network::NetworkServiceHandle,
 	/// Protocol name used for block announcements
@@ -1326,47 +1322,6 @@ where
 		&mut self,
 		cx: &mut std::task::Context,
 	) -> Poll<PollBlockAnnounceValidation<B::Header>> {
-		while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) {
-			match event {
-				ToServiceCommand::SetSyncForkRequest(peers, hash, number) => {
-					self.set_sync_fork_request(peers, &hash, number);
-				},
-				ToServiceCommand::RequestJustification(hash, number) =>
-					self.request_justification(&hash, number),
-				ToServiceCommand::ClearJustificationRequests => self.clear_justification_requests(),
-				ToServiceCommand::BlocksProcessed(imported, count, results) => {
-					for result in self.on_blocks_processed(imported, count, results) {
-						match result {
-							Ok((id, req)) => self.send_block_request(id, req),
-							Err(BadPeer(id, repu)) => {
-								self.network_service
-									.disconnect_peer(id, self.block_announce_protocol_name.clone());
-								self.network_service.report_peer(id, repu)
-							},
-						}
-					}
-				},
-				ToServiceCommand::JustificationImported(peer, hash, number, success) => {
-					self.on_justification_import(hash, number, success);
-					if !success {
-						info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash);
-						self.network_service
-							.disconnect_peer(peer, self.block_announce_protocol_name.clone());
-						self.network_service.report_peer(
-							peer,
-							sc_peerset::ReputationChange::new_fatal("Invalid justification"),
-						);
-					}
-				},
-				ToServiceCommand::BlockFinalized(hash, number) => {
-					self.on_block_finalized(&hash, number);
-				},
-				ToServiceCommand::Status { pending_response } => {
-					let _ = pending_response.send(self.status());
-				},
-			}
-		}
-
 		// Should be called before `process_outbound_requests` to ensure
 		// that a potential target block is directly leading to requests.
 		if let Some(warp_sync) = &mut self.warp_sync {
@@ -1448,8 +1403,7 @@ where
 		block_request_protocol_name: ProtocolName,
 		state_request_protocol_name: ProtocolName,
 		warp_sync_protocol_name: Option<ProtocolName>,
-	) -> Result<(Self, ChainSyncInterfaceHandle<B>, NonDefaultSetConfig), ClientError> {
-		let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000);
+	) -> Result<(Self, NonDefaultSetConfig), ClientError> {
 		let block_announce_config = Self::get_block_announce_proto_config(
 			protocol_id,
 			fork_id,
@@ -1483,7 +1437,6 @@ where
 			warp_sync: None,
 			import_existing: false,
 			gap_sync: None,
-			service_rx,
 			network_service,
 			block_request_protocol_name,
 			state_request_protocol_name,
@@ -1509,7 +1462,7 @@ where
 		};
 
 		sync.reset_sync_start_point()?;
-		Ok((sync, ChainSyncInterfaceHandle::new(tx), block_announce_config))
+		Ok((sync, block_announce_config))
 	}
 
 	/// Returns the median seen block number.
@@ -3231,7 +3184,7 @@ mod test {
 		let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new());
 		let (_chain_sync_network_provider, chain_sync_network_handle) =
 			NetworkServiceProvider::new();
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -3297,7 +3250,7 @@ mod test {
 		let (_chain_sync_network_provider, chain_sync_network_handle) =
 			NetworkServiceProvider::new();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -3478,7 +3431,7 @@ mod test {
 		let (_chain_sync_network_provider, chain_sync_network_handle) =
 			NetworkServiceProvider::new();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -3604,7 +3557,7 @@ mod test {
 			NetworkServiceProvider::new();
 		let info = client.info();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -3760,7 +3713,7 @@ mod test {
 
 		let info = client.info();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -3901,7 +3854,7 @@ mod test {
 
 		let info = client.info();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -4042,7 +3995,7 @@ mod test {
 		let mut client = Arc::new(TestClientBuilder::new().build());
 		let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::<Vec<_>>();
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			client.clone(),
 			ProtocolId::from("test-protocol-name"),
@@ -4087,7 +4040,7 @@ mod test {
 
 		let empty_client = Arc::new(TestClientBuilder::new().build());
 
-		let (mut sync, _, _) = ChainSync::new(
+		let (mut sync, _) = ChainSync::new(
 			SyncMode::Full,
 			empty_client.clone(),
 			ProtocolId::from("test-protocol-name"),
diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/chain_sync.rs
index 4d47899a317..99b4197740e 100644
--- a/substrate/client/network/sync/src/service/chain_sync.rs
+++ b/substrate/client/network/sync/src/service/chain_sync.rs
@@ -16,15 +16,26 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use futures::channel::oneshot;
+use futures::{channel::oneshot, Stream};
 use libp2p::PeerId;
+
 use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link};
-use sc_network_common::{service::NetworkSyncForkRequest, sync::SyncStatus};
-use sc_utils::mpsc::TracingUnboundedSender;
+use sc_network_common::{
+	service::{NetworkBlock, NetworkSyncForkRequest},
+	sync::{ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider},
+};
+use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
+use std::{
+	pin::Pin,
+	sync::{
+		atomic::{AtomicBool, AtomicUsize, Ordering},
+		Arc,
+	},
+};
+
 /// Commands send to `ChainSync`
-#[derive(Debug)]
 pub enum ToServiceCommand<B: BlockT> {
 	SetSyncForkRequest(Vec<PeerId>, B::Hash, NumberFor<B>),
 	RequestJustification(B::Hash, NumberFor<B>),
@@ -35,27 +46,105 @@ pub enum ToServiceCommand<B: BlockT> {
 		Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
 	),
 	JustificationImported(PeerId, B::Hash, NumberFor<B>, bool),
-	BlockFinalized(B::Hash, NumberFor<B>),
-	Status {
-		pending_response: oneshot::Sender<SyncStatus<B>>,
-	},
+	AnnounceBlock(B::Hash, Option<Vec<u8>>),
+	NewBestBlockImported(B::Hash, NumberFor<B>),
+	EventStream(TracingUnboundedSender<SyncEvent>),
+	Status(oneshot::Sender<SyncStatus<B>>),
+	NumActivePeers(oneshot::Sender<usize>),
+	SyncState(oneshot::Sender<SyncStatus<B>>),
+	BestSeenBlock(oneshot::Sender<Option<NumberFor<B>>>),
+	NumSyncPeers(oneshot::Sender<u32>),
+	NumQueuedBlocks(oneshot::Sender<u32>),
+	NumDownloadedBlocks(oneshot::Sender<usize>),
+	NumSyncRequests(oneshot::Sender<usize>),
+	PeersInfo(oneshot::Sender<Vec<(PeerId, ExtendedPeerInfo<B>)>>),
+	OnBlockFinalized(B::Hash, B::Header),
+	// Status {
+	// 	pending_response: oneshot::Sender<SyncStatus<B>>,
+	// },
 }
 
 /// Handle for communicating with `ChainSync` asynchronously
 #[derive(Clone)]
-pub struct ChainSyncInterfaceHandle<B: BlockT> {
+pub struct SyncingService<B: BlockT> {
 	tx: TracingUnboundedSender<ToServiceCommand<B>>,
+	/// Number of peers we're connected to.
+	num_connected: Arc<AtomicUsize>,
+	/// Are we actively catching up with the chain?
+	is_major_syncing: Arc<AtomicBool>,
 }
 
-impl<B: BlockT> ChainSyncInterfaceHandle<B> {
+impl<B: BlockT> SyncingService<B> {
 	/// Create new handle
-	pub fn new(tx: TracingUnboundedSender<ToServiceCommand<B>>) -> Self {
-		Self { tx }
+	pub fn new(
+		tx: TracingUnboundedSender<ToServiceCommand<B>>,
+		num_connected: Arc<AtomicUsize>,
+		is_major_syncing: Arc<AtomicBool>,
+	) -> Self {
+		Self { tx, num_connected, is_major_syncing }
 	}
 
-	/// Notify ChainSync about finalized block
-	pub fn on_block_finalized(&self, hash: B::Hash, number: NumberFor<B>) {
-		let _ = self.tx.unbounded_send(ToServiceCommand::BlockFinalized(hash, number));
+	/// Get the number of active peers.
+	pub async fn num_active_peers(&self) -> Result<usize, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx));
+
+		rx.await
+	}
+
+	/// Get best seen block.
+	pub async fn best_seen_block(&self) -> Result<Option<NumberFor<B>>, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::BestSeenBlock(tx));
+
+		rx.await
+	}
+
+	/// Get the number of sync peers.
+	pub async fn num_sync_peers(&self) -> Result<u32, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncPeers(tx));
+
+		rx.await
+	}
+
+	/// Get the number of queued blocks.
+	pub async fn num_queued_blocks(&self) -> Result<u32, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::NumQueuedBlocks(tx));
+
+		rx.await
+	}
+
+	/// Get the number of downloaded blocks.
+	pub async fn num_downloaded_blocks(&self) -> Result<usize, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::NumDownloadedBlocks(tx));
+
+		rx.await
+	}
+
+	/// Get the number of sync requests.
+	pub async fn num_sync_requests(&self) -> Result<usize, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncRequests(tx));
+
+		rx.await
+	}
+
+	/// Get peer information.
+	pub async fn peers_info(
+		&self,
+	) -> Result<Vec<(PeerId, ExtendedPeerInfo<B>)>, oneshot::Canceled> {
+		let (tx, rx) = oneshot::channel();
+		let _ = self.tx.unbounded_send(ToServiceCommand::PeersInfo(tx));
+
+		rx.await
+	}
+
+	/// Notify the `SyncingEngine` that a block has been finalized.
+	pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) {
+		let _ = self.tx.unbounded_send(ToServiceCommand::OnBlockFinalized(hash, header));
 	}
 
 	/// Get sync status
@@ -63,15 +152,13 @@ impl<B: BlockT> ChainSyncInterfaceHandle<B> {
 	/// Returns an error if `ChainSync` has terminated.
 	pub async fn status(&self) -> Result<SyncStatus<B>, ()> {
 		let (tx, rx) = oneshot::channel();
-		let _ = self.tx.unbounded_send(ToServiceCommand::Status { pending_response: tx });
+		let _ = self.tx.unbounded_send(ToServiceCommand::Status(tx));
 
 		rx.await.map_err(|_| ())
 	}
 }
 
-impl<B: BlockT + 'static> NetworkSyncForkRequest<B::Hash, NumberFor<B>>
-	for ChainSyncInterfaceHandle<B>
-{
+impl<B: BlockT + 'static> NetworkSyncForkRequest<B::Hash, NumberFor<B>> for SyncingService<B> {
 	/// Configure an explicit fork sync request.
 	///
 	/// Note that this function should not be used for recent blocks.
@@ -87,7 +174,7 @@ impl<B: BlockT + 'static> NetworkSyncForkRequest<B::Hash, NumberFor<B>>
 	}
 }
 
-impl<B: BlockT> JustificationSyncLink<B> for ChainSyncInterfaceHandle<B> {
+impl<B: BlockT> JustificationSyncLink<B> for SyncingService<B> {
 	/// Request a justification for the given block from the network.
 	///
 	/// On success, the justification will be passed to the import queue that was part at
@@ -101,7 +188,18 @@ impl<B: BlockT> JustificationSyncLink<B> for ChainSyncInterfaceHandle<B> {
 	}
 }
 
-impl<B: BlockT> Link<B> for ChainSyncInterfaceHandle<B> {
+#[async_trait::async_trait]
+impl<B: BlockT> SyncStatusProvider<B> for SyncingService<B> {
+	/// Get high-level view of the syncing status.
+	async fn status(&self) -> Result<SyncStatus<B>, ()> {
+		let (rtx, rrx) = oneshot::channel();
+
+		let _ = self.tx.unbounded_send(ToServiceCommand::Status(rtx));
+		rrx.await.map_err(|_| ())
+	}
+}
+
+impl<B: BlockT> Link<B> for SyncingService<B> {
 	fn blocks_processed(
 		&mut self,
 		imported: usize,
@@ -129,3 +227,32 @@ impl<B: BlockT> Link<B> for ChainSyncInterfaceHandle<B> {
 		let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number));
 	}
 }
+
+impl<B: BlockT> SyncEventStream for SyncingService<B> {
+	/// Get syncing event stream.
+	fn event_stream(&self, name: &'static str) -> Pin<Box<dyn Stream<Item = SyncEvent> + Send>> {
+		let (tx, rx) = tracing_unbounded(name, 100_000);
+		let _ = self.tx.unbounded_send(ToServiceCommand::EventStream(tx));
+		Box::pin(rx)
+	}
+}
+
+impl<B: BlockT> NetworkBlock<B::Hash, NumberFor<B>> for SyncingService<B> {
+	fn announce_block(&self, hash: B::Hash, data: Option<Vec<u8>>) {
+		let _ = self.tx.unbounded_send(ToServiceCommand::AnnounceBlock(hash, data));
+	}
+
+	fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor<B>) {
+		let _ = self.tx.unbounded_send(ToServiceCommand::NewBestBlockImported(hash, number));
+	}
+}
+
+impl<B: BlockT> sp_consensus::SyncOracle for SyncingService<B> {
+	fn is_major_syncing(&self) -> bool {
+		self.is_major_syncing.load(Ordering::Relaxed)
+	}
+
+	fn is_offline(&self) -> bool {
+		self.num_connected.load(Ordering::Relaxed) == 0
+	}
+}
diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs
index e66a9e5611f..2853616adfb 100644
--- a/substrate/client/network/sync/src/service/mock.rs
+++ b/substrate/client/network/sync/src/service/mock.rs
@@ -23,7 +23,10 @@ use sc_network_common::{
 	config::MultiaddrWithPeerId,
 	protocol::ProtocolName,
 	request_responses::{IfDisconnected, RequestFailure},
-	service::{NetworkPeers, NetworkRequest, NetworkSyncForkRequest},
+	service::{
+		NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest,
+		NotificationSender, NotificationSenderError,
+	},
 };
 use sc_peerset::ReputationChange;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
@@ -125,4 +128,14 @@ mockall::mock! {
 			connect: IfDisconnected,
 		);
 	}
+
+	impl NetworkNotification for Network {
+		fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>);
+		fn notification_sender(
+			&self,
+			target: PeerId,
+			protocol: ProtocolName,
+		) -> Result<Box<dyn NotificationSender>, NotificationSenderError>;
+		fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>);
+	}
 }
diff --git a/substrate/client/network/sync/src/service/network.rs b/substrate/client/network/sync/src/service/network.rs
index de7e255f5a8..a1f4f27bbbd 100644
--- a/substrate/client/network/sync/src/service/network.rs
+++ b/substrate/client/network/sync/src/service/network.rs
@@ -21,16 +21,16 @@ use libp2p::PeerId;
 use sc_network_common::{
 	protocol::ProtocolName,
 	request_responses::{IfDisconnected, RequestFailure},
-	service::{NetworkPeers, NetworkRequest},
+	service::{NetworkNotification, NetworkPeers, NetworkRequest},
 };
 use sc_peerset::ReputationChange;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
 use std::sync::Arc;
 
 /// Network-related services required by `sc-network-sync`
-pub trait Network: NetworkPeers + NetworkRequest {}
+pub trait Network: NetworkPeers + NetworkRequest + NetworkNotification {}
 
-impl<T> Network for T where T: NetworkPeers + NetworkRequest {}
+impl<T> Network for T where T: NetworkPeers + NetworkRequest + NetworkNotification {}
 
 /// Network service provider for `ChainSync`
 ///
@@ -56,6 +56,12 @@ pub enum ToServiceCommand {
 		oneshot::Sender<Result<Vec<u8>, RequestFailure>>,
 		IfDisconnected,
 	),
+
+	/// Call `NetworkNotification::write_notification()`
+	WriteNotification(PeerId, ProtocolName, Vec<u8>),
+
+	/// Call `NetworkNotification::set_notification_handshake()`
+	SetNotificationHandshake(ProtocolName, Vec<u8>),
 }
 
 /// Handle that is (temporarily) passed to `ChainSync` so it can
@@ -94,6 +100,20 @@ impl NetworkServiceHandle {
 			.tx
 			.unbounded_send(ToServiceCommand::StartRequest(who, protocol, request, tx, connect));
 	}
+
+	/// Send notification to peer
+	pub fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec<u8>) {
+		let _ = self
+			.tx
+			.unbounded_send(ToServiceCommand::WriteNotification(who, protocol, message));
+	}
+
+	/// Set handshake for the notification protocol.
+	pub fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
+		let _ = self
+			.tx
+			.unbounded_send(ToServiceCommand::SetNotificationHandshake(protocol, handshake));
+	}
 }
 
 impl NetworkServiceProvider {
@@ -114,6 +134,10 @@ impl NetworkServiceProvider {
 					service.report_peer(peer, reputation_change),
 				ToServiceCommand::StartRequest(peer, protocol, request, tx, connect) =>
 					service.start_request(peer, protocol, request, tx, connect),
+				ToServiceCommand::WriteNotification(peer, protocol, message) =>
+					service.write_notification(peer, protocol, message),
+				ToServiceCommand::SetNotificationHandshake(protocol, handshake) =>
+					service.set_notification_handshake(protocol, handshake),
 			}
 		}
 	}
diff --git a/substrate/client/network/sync/src/tests.rs b/substrate/client/network/sync/src/tests.rs
deleted file mode 100644
index d56e84093ad..00000000000
--- a/substrate/client/network/sync/src/tests.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-use crate::{service::network::NetworkServiceProvider, ChainSync, ForkTarget};
-
-use libp2p::PeerId;
-use sc_network_common::{
-	config::ProtocolId,
-	protocol::{
-		role::{Role, Roles},
-		ProtocolName,
-	},
-	service::NetworkSyncForkRequest,
-	sync::ChainSync as ChainSyncT,
-};
-use sp_consensus::block_validation::DefaultBlockAnnounceValidator;
-use sp_core::H256;
-use std::{sync::Arc, task::Poll};
-use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _};
-
-// verify that the fork target map is empty, then submit a new sync fork request,
-// poll `ChainSync` and verify that a new sync fork request has been registered
-#[tokio::test]
-async fn delegate_to_chainsync() {
-	let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new());
-	let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new();
-	let (mut chain_sync, chain_sync_service, _) = ChainSync::new(
-		sc_network_common::sync::SyncMode::Full,
-		Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0),
-		ProtocolId::from("test-protocol-name"),
-		&Some(String::from("test-fork-id")),
-		Roles::from(&Role::Full),
-		Box::new(DefaultBlockAnnounceValidator),
-		1u32,
-		None,
-		None,
-		chain_sync_network_handle,
-		import_queue,
-		ProtocolName::from("block-request"),
-		ProtocolName::from("state-request"),
-		None,
-	)
-	.unwrap();
-
-	let hash = H256::random();
-	let in_number = 1337u64;
-	let peers = (0..3).map(|_| PeerId::random()).collect::<Vec<_>>();
-
-	assert!(chain_sync.fork_targets.is_empty());
-	chain_sync_service.set_sync_fork_request(peers, hash, in_number);
-
-	futures::future::poll_fn(|cx| {
-		let _ = chain_sync.poll(cx);
-		Poll::Ready(())
-	})
-	.await;
-
-	if let Some(ForkTarget { number, .. }) = chain_sync.fork_targets.get(&hash) {
-		assert_eq!(number, &in_number);
-	} else {
-		panic!("expected to contain `ForkTarget`");
-	}
-}
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index 544f4c5f62d..95381dd7b4c 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -46,24 +46,24 @@ use sc_consensus::{
 	ForkChoiceStrategy, ImportQueue, ImportResult, JustificationImport, JustificationSyncLink,
 	LongestChain, Verifier,
 };
-use sc_network::{
-	config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode},
-	Multiaddr, NetworkService, NetworkWorker,
-};
+use sc_network::{Multiaddr, NetworkService, NetworkWorker};
 use sc_network_common::{
 	config::{
-		MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig,
+		MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode,
+		ProtocolId, RequestResponseConfig, Role, SyncMode, TransportConfig,
 	},
 	protocol::{role::Roles, ProtocolName},
-	service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest},
+	service::{NetworkBlock, NetworkEventStream, NetworkStateInfo, NetworkSyncForkRequest},
 	sync::warp::{
 		AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider,
 	},
 };
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
-	block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider,
-	state_request_handler::StateRequestHandler, warp_request_handler, ChainSync,
+	block_request_handler::BlockRequestHandler,
+	service::{chain_sync::SyncingService, network::NetworkServiceProvider},
+	state_request_handler::StateRequestHandler,
+	warp_request_handler,
 };
 use sc_service::client::Client;
 use sp_blockchain::{
@@ -241,7 +241,8 @@ pub struct Peer<D, BlockImport> {
 	block_import: BlockImportAdapter<BlockImport>,
 	select_chain: Option<LongestChain<substrate_test_runtime_client::Backend, Block>>,
 	backend: Option<Arc<substrate_test_runtime_client::Backend>>,
-	network: NetworkWorker<Block, <Block as BlockT>::Hash, PeersFullClient>,
+	network: NetworkWorker<Block, <Block as BlockT>::Hash>,
+	sync_service: Arc<SyncingService<Block>>,
 	imported_blocks_stream: Pin<Box<dyn Stream<Item = BlockImportNotification<Block>> + Send>>,
 	finality_notification_stream: Pin<Box<dyn Stream<Item = FinalityNotification<Block>> + Send>>,
 	listen_addr: Multiaddr,
@@ -259,7 +260,7 @@ where
 
 	/// Returns true if we're major syncing.
 	pub fn is_major_syncing(&self) -> bool {
-		self.network.service().is_major_syncing()
+		self.sync_service.is_major_syncing()
 	}
 
 	// Returns a clone of the local SelectChain, only available on full nodes
@@ -275,23 +276,23 @@ where
 	}
 
 	/// Returns the number of downloaded blocks.
-	pub fn num_downloaded_blocks(&self) -> usize {
-		self.network.num_downloaded_blocks()
+	pub async fn num_downloaded_blocks(&self) -> usize {
+		self.sync_service.num_downloaded_blocks().await.unwrap()
 	}
 
 	/// Returns true if we have no peer.
 	pub fn is_offline(&self) -> bool {
-		self.num_peers() == 0
+		self.sync_service.is_offline()
 	}
 
 	/// Request a justification for the given block.
 	pub fn request_justification(&self, hash: &<Block as BlockT>::Hash, number: NumberFor<Block>) {
-		self.network.service().request_justification(hash, number);
+		self.sync_service.request_justification(hash, number);
 	}
 
 	/// Announces an important block on the network.
 	pub fn announce_block(&self, hash: <Block as BlockT>::Hash, data: Option<Vec<u8>>) {
-		self.network.service().announce_block(hash, data);
+		self.sync_service.announce_block(hash, data);
 	}
 
 	/// Request explicit fork sync.
@@ -301,7 +302,7 @@ where
 		hash: <Block as BlockT>::Hash,
 		number: NumberFor<Block>,
 	) {
-		self.network.service().set_sync_fork_request(peers, hash, number);
+		self.sync_service.set_sync_fork_request(peers, hash, number);
 	}
 
 	/// Add blocks to the peer -- edit the block before adding
@@ -402,14 +403,14 @@ where
 			futures::executor::block_on(self.block_import.import_block(import_block, cache))
 				.expect("block_import failed");
 			if announce_block {
-				self.network.service().announce_block(hash, None);
+				self.sync_service.announce_block(hash, None);
 			}
 			hashes.push(hash);
 			at = hash;
 		}
 
 		if inform_sync_about_new_best_block {
-			self.network.new_best_block_imported(
+			self.sync_service.new_best_block_imported(
 				at,
 				*full_client.header(at).ok().flatten().unwrap().number(),
 			);
@@ -525,8 +526,12 @@ where
 		self.network.service()
 	}
 
+	pub fn sync_service(&self) -> &Arc<SyncingService<Block>> {
+		&self.sync_service
+	}
+
 	/// Get a reference to the network worker.
-	pub fn network(&self) -> &NetworkWorker<Block, <Block as BlockT>::Hash, PeersFullClient> {
+	pub fn network(&self) -> &NetworkWorker<Block, <Block as BlockT>::Hash> {
 		&self.network
 	}
 
@@ -728,13 +733,13 @@ pub struct FullPeerConfig {
 }
 
 #[async_trait::async_trait]
-pub trait TestNetFactory: Default + Sized
+pub trait TestNetFactory: Default + Sized + Send
 where
 	<Self::BlockImport as BlockImport<Block>>::Transaction: Send,
 {
 	type Verifier: 'static + Verifier<Block>;
 	type BlockImport: BlockImport<Block, Error = ConsensusError> + Clone + Send + Sync + 'static;
-	type PeerData: Default;
+	type PeerData: Default + Send;
 
 	/// This one needs to be implemented!
 	fn make_verifier(&self, client: PeersClient, peer_data: &Self::PeerData) -> Self::Verifier;
@@ -742,6 +747,7 @@ where
 	/// Get reference to peer.
 	fn peer(&mut self, i: usize) -> &mut Peer<Self::PeerData, Self::BlockImport>;
 	fn peers(&self) -> &Vec<Peer<Self::PeerData, Self::BlockImport>>;
+	fn peers_mut(&mut self) -> &mut Vec<Peer<Self::PeerData, Self::BlockImport>>;
 	fn mut_peers<F: FnOnce(&mut Vec<Peer<Self::PeerData, Self::BlockImport>>)>(
 		&mut self,
 		closure: F,
@@ -900,31 +906,25 @@ where
 		let (chain_sync_network_provider, chain_sync_network_handle) =
 			NetworkServiceProvider::new();
 
-		let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new(
-			match network_config.sync_mode {
-				SyncMode::Full => sc_network_common::sync::SyncMode::Full,
-				SyncMode::Fast { skip_proofs, storage_chain_mode } =>
-					sc_network_common::sync::SyncMode::LightState {
-						skip_proofs,
-						storage_chain_mode,
-					},
-				SyncMode::Warp => sc_network_common::sync::SyncMode::Warp,
-			},
-			client.clone(),
-			protocol_id.clone(),
-			&fork_id,
-			Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
-			block_announce_validator,
-			network_config.max_parallel_downloads,
-			Some(warp_sync_params),
-			None,
-			chain_sync_network_handle,
-			import_queue.service(),
-			block_request_protocol_config.name.clone(),
-			state_request_protocol_config.name.clone(),
-			Some(warp_protocol_config.name.clone()),
-		)
-		.unwrap();
+		let (engine, sync_service, block_announce_config) =
+			sc_network_sync::engine::SyncingEngine::new(
+				Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
+				client.clone(),
+				None,
+				&network_config,
+				protocol_id.clone(),
+				&fork_id,
+				block_announce_validator,
+				Some(warp_sync_params),
+				chain_sync_network_handle,
+				import_queue.service(),
+				block_request_protocol_config.name.clone(),
+				state_request_protocol_config.name.clone(),
+				Some(warp_protocol_config.name.clone()),
+			)
+			.unwrap();
+		let sync_service_import_queue = Box::new(sync_service.clone());
+		let sync_service = Arc::new(sync_service.clone());
 
 		let network = NetworkWorker::new(sc_network::config::Params {
 			role: if config.is_authority { Role::Authority } else { Role::Full },
@@ -935,8 +935,6 @@ where
 			chain: client.clone(),
 			protocol_id,
 			fork_id,
-			chain_sync: Box::new(chain_sync),
-			chain_sync_service: Box::new(chain_sync_service.clone()),
 			metrics_registry: None,
 			block_announce_config,
 			request_response_protocol_configs: [
@@ -955,8 +953,14 @@ where
 		tokio::spawn(async move {
 			chain_sync_network_provider.run(service).await;
 		});
+
+		tokio::spawn(async move {
+			import_queue.run(sync_service_import_queue).await;
+		});
+
+		let service = network.service().clone();
 		tokio::spawn(async move {
-			import_queue.run(Box::new(chain_sync_service)).await;
+			engine.run(service.event_stream("syncing")).await;
 		});
 
 		self.mut_peers(move |peers| {
@@ -979,6 +983,7 @@ where
 				block_import,
 				verifier,
 				network,
+				sync_service,
 				listen_addr,
 			});
 		});
@@ -989,71 +994,75 @@ where
 		tokio::spawn(f);
 	}
 
-	/// Polls the testnet until all nodes are in sync.
+	/// Polls the testnet until all peers are connected to each other.
 	///
 	/// Must be executed in a task context.
-	fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> {
+	fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> {
 		self.poll(cx);
 
-		// Return `NotReady` if there's a mismatch in the highest block number.
+		let num_peers = self.peers().len();
+		if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) {
+			return Poll::Ready(())
+		}
+
+		Poll::Pending
+	}
+
+	async fn is_in_sync(&mut self) -> bool {
 		let mut highest = None;
-		for peer in self.peers().iter() {
-			if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 {
-				return Poll::Pending
+		let peers = self.peers_mut();
+
+		for peer in peers {
+			if peer.sync_service.is_major_syncing() ||
+				peer.sync_service.num_queued_blocks().await.unwrap() != 0
+			{
+				return false
 			}
-			if peer.network.num_sync_requests() != 0 {
-				return Poll::Pending
+			if peer.sync_service.num_sync_requests().await.unwrap() != 0 {
+				return false
 			}
 			match (highest, peer.client.info().best_hash) {
 				(None, b) => highest = Some(b),
 				(Some(ref a), ref b) if a == b => {},
-				(Some(_), _) => return Poll::Pending,
+				(Some(_), _) => return false,
 			}
 		}
-		Poll::Ready(())
-	}
 
-	/// Polls the testnet until theres' no activiy of any kind.
-	///
-	/// Must be executed in a task context.
-	fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> {
-		self.poll(cx);
+		true
+	}
 
-		for peer in self.peers().iter() {
-			if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 {
-				return Poll::Pending
+	async fn is_idle(&mut self) -> bool {
+		let peers = self.peers_mut();
+		for peer in peers {
+			if peer.sync_service.num_queued_blocks().await.unwrap() != 0 {
+				return false
 			}
-			if peer.network.num_sync_requests() != 0 {
-				return Poll::Pending
+			if peer.sync_service.num_sync_requests().await.unwrap() != 0 {
+				return false
 			}
 		}
 
-		Poll::Ready(())
-	}
-
-	/// Polls the testnet until all peers are connected to each other.
-	///
-	/// Must be executed in a task context.
-	fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> {
-		self.poll(cx);
-
-		let num_peers = self.peers().len();
-		if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) {
-			return Poll::Ready(())
-		}
-
-		Poll::Pending
+		true
 	}
 
-	/// Run the network until we are sync'ed.
+	/// Blocks the current thread until we are sync'ed.
+	/// Wait until we are sync'ed.
 	///
-	/// Calls `poll_until_sync` repeatedly.
 	/// (If we've not synced within 10 mins then panic rather than hang.)
 	async fn run_until_sync(&mut self) {
-		timeout(
-			Duration::from_secs(10 * 60),
-			futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx)),
-		)
+		timeout(Duration::from_secs(10 * 60), async {
+			loop {
+				futures::future::poll_fn::<(), _>(|cx| {
+					self.poll(cx);
+					Poll::Ready(())
+				})
+				.await;
+
+				if self.is_in_sync().await {
+					break
+				}
+			}
+		})
 		.await
 		.expect("sync didn't happen within 10 mins");
 	}
@@ -1062,7 +1071,17 @@ where
 	///
 	/// Calls `poll_until_idle` repeatedly with the runtime passed as parameter.
 	async fn run_until_idle(&mut self) {
-		futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx)).await;
+		loop {
+			futures::future::poll_fn::<(), _>(|cx| {
+				self.poll(cx);
+				Poll::Ready(())
+			})
+			.await;
+
+			if self.is_idle().await {
+				break
+			}
+		}
 	}
 
 	/// Run the network until all peers are connected to each other.
@@ -1095,14 +1114,14 @@ where
 				while let Poll::Ready(Some(notification)) =
 					peer.imported_blocks_stream.as_mut().poll_next(cx)
 				{
-					peer.network.service().announce_block(notification.hash, None);
+					peer.sync_service.announce_block(notification.hash, None);
 				}
 
 				// We poll `finality_notification_stream`.
 				while let Poll::Ready(Some(notification)) =
 					peer.finality_notification_stream.as_mut().poll_next(cx)
 				{
-					peer.network.on_block_finalized(notification.hash, notification.header);
+					peer.sync_service.on_block_finalized(notification.hash, notification.header);
 				}
 			}
 		});
@@ -1142,6 +1161,10 @@ impl TestNetFactory for TestNet {
 		&self.peers
 	}
 
+	fn peers_mut(&mut self) -> &mut Vec<Peer<(), Self::BlockImport>> {
+		&mut self.peers
+	}
+
 	fn mut_peers<F: FnOnce(&mut Vec<Peer<(), Self::BlockImport>>)>(&mut self, closure: F) {
 		closure(&mut self.peers);
 	}
@@ -1189,6 +1212,10 @@ impl TestNetFactory for JustificationTestNet {
 		self.0.peers()
 	}
 
+	fn peers_mut(&mut self) -> &mut Vec<Peer<Self::PeerData, Self::BlockImport>> {
+		self.0.peers_mut()
+	}
+
 	fn mut_peers<F: FnOnce(&mut Vec<Peer<Self::PeerData, Self::BlockImport>>)>(
 		&mut self,
 		closure: F,
diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs
index 490ec134c7a..d87b03fb3a7 100644
--- a/substrate/client/network/test/src/sync.rs
+++ b/substrate/client/network/test/src/sync.rs
@@ -652,12 +652,12 @@ async fn imports_stale_once() {
 	// check that NEW block is imported from announce message
 	let new_hash = net.peer(0).push_blocks(1, false).pop().unwrap();
 	import_with_announce(&mut net, new_hash).await;
-	assert_eq!(net.peer(1).num_downloaded_blocks(), 1);
+	assert_eq!(net.peer(1).num_downloaded_blocks().await, 1);
 
 	// check that KNOWN STALE block is imported from announce message
 	let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true).pop().unwrap();
 	import_with_announce(&mut net, known_stale_hash).await;
-	assert_eq!(net.peer(1).num_downloaded_blocks(), 2);
+	assert_eq!(net.peer(1).num_downloaded_blocks().await, 2);
 }
 
 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -820,7 +820,7 @@ async fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block()
 	assert!(!net.peer(1).has_block(block_hash));
 
 	// Make sync protocol aware of the best block
-	net.peer(0).network_service().new_best_block_imported(block_hash, 3);
+	net.peer(0).sync_service().new_best_block_imported(block_hash, 3);
 	net.run_until_idle().await;
 
 	// Connect another node that should now sync to the tip
@@ -865,8 +865,8 @@ async fn sync_to_tip_when_we_sync_together_with_multiple_peers() {
 
 	assert!(!net.peer(2).has_block(block_hash));
 
-	net.peer(0).network_service().new_best_block_imported(block_hash, 10_000);
-	net.peer(0).network_service().announce_block(block_hash, None);
+	net.peer(0).sync_service().new_best_block_imported(block_hash, 10_000);
+	net.peer(0).sync_service().announce_block(block_hash, None);
 
 	while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) {
 		net.run_until_idle().await;
@@ -1045,14 +1045,17 @@ async fn syncs_all_forks_from_single_peer() {
 	let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true).pop().unwrap();
 
 	// Wait till peer 1 starts downloading
-	futures::future::poll_fn::<(), _>(|cx| {
-		net.poll(cx);
-		if net.peer(1).network().best_seen_block() != Some(12) {
-			return Poll::Pending
+	loop {
+		futures::future::poll_fn::<(), _>(|cx| {
+			net.poll(cx);
+			Poll::Ready(())
+		})
+		.await;
+
+		if net.peer(1).sync_service().best_seen_block().await.unwrap() == Some(12) {
+			break
 		}
-		Poll::Ready(())
-	})
-	.await;
+	}
 
 	// Peer 0 produces and announces another fork
 	let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false).pop().unwrap();
diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs
index d4d08d2ab75..48bb3949466 100644
--- a/substrate/client/network/transactions/src/lib.rs
+++ b/substrate/client/network/transactions/src/lib.rs
@@ -37,6 +37,7 @@ use sc_network_common::{
 	error,
 	protocol::{event::Event, role::ObservedRole, ProtocolName},
 	service::{NetworkEventStream, NetworkNotification, NetworkPeers},
+	sync::{SyncEvent, SyncEventStream},
 	utils::{interval, LruHashSet},
 	ExHashT,
 };
@@ -161,14 +162,17 @@ impl TransactionsHandlerPrototype {
 	pub fn build<
 		B: BlockT + 'static,
 		H: ExHashT,
-		S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle,
+		N: NetworkPeers + NetworkEventStream + NetworkNotification,
+		S: SyncEventStream + sp_consensus::SyncOracle,
 	>(
 		self,
-		service: S,
+		network: N,
+		sync: S,
 		transaction_pool: Arc<dyn TransactionPool<H, B>>,
 		metrics_registry: Option<&Registry>,
-	) -> error::Result<(TransactionsHandler<B, H, S>, TransactionsHandlerController<H>)> {
-		let event_stream = service.event_stream("transactions-handler");
+	) -> error::Result<(TransactionsHandler<B, H, N, S>, TransactionsHandlerController<H>)> {
+		let net_event_stream = network.event_stream("transactions-handler-net");
+		let sync_event_stream = sync.event_stream("transactions-handler-sync");
 		let (to_handler, from_controller) = tracing_unbounded("mpsc_transactions_handler", 100_000);
 
 		let handler = TransactionsHandler {
@@ -178,8 +182,10 @@ impl TransactionsHandlerPrototype {
 				.fuse(),
 			pending_transactions: FuturesUnordered::new(),
 			pending_transactions_peers: HashMap::new(),
-			service,
-			event_stream: event_stream.fuse(),
+			network,
+			sync,
+			net_event_stream: net_event_stream.fuse(),
+			sync_event_stream: sync_event_stream.fuse(),
 			peers: HashMap::new(),
 			transaction_pool,
 			from_controller,
@@ -228,7 +234,8 @@ enum ToHandler<H: ExHashT> {
 pub struct TransactionsHandler<
 	B: BlockT + 'static,
 	H: ExHashT,
-	S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle,
+	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	S: SyncEventStream + sp_consensus::SyncOracle,
 > {
 	protocol_name: ProtocolName,
 	/// Interval at which we call `propagate_transactions`.
@@ -241,9 +248,13 @@ pub struct TransactionsHandler<
 	/// multiple times concurrently.
 	pending_transactions_peers: HashMap<H, Vec<PeerId>>,
 	/// Network service to use to send messages and manage peers.
-	service: S,
+	network: N,
+	/// Syncing service.
+	sync: S,
 	/// Stream of networking events.
-	event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = Event> + Send>>>,
+	net_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = Event> + Send>>>,
+	/// Receiver for syncing-related events.
+	sync_event_stream: stream::Fuse<Pin<Box<dyn Stream<Item = SyncEvent> + Send>>>,
 	// All connected peers
 	peers: HashMap<PeerId, Peer<H>>,
 	transaction_pool: Arc<dyn TransactionPool<H, B>>,
@@ -260,11 +271,12 @@ struct Peer<H: ExHashT> {
 	role: ObservedRole,
 }
 
-impl<B, H, S> TransactionsHandler<B, H, S>
+impl<B, H, N, S> TransactionsHandler<B, H, N, S>
 where
 	B: BlockT + 'static,
 	H: ExHashT,
-	S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle,
+	N: NetworkPeers + NetworkEventStream + NetworkNotification,
+	S: SyncEventStream + sp_consensus::SyncOracle,
 {
 	/// Turns the [`TransactionsHandler`] into a future that should run forever and not be
 	/// interrupted.
@@ -281,7 +293,7 @@ where
 						warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!");
 					}
 				},
-				network_event = self.event_stream.next() => {
+				network_event = self.net_event_stream.next() => {
 					if let Some(network_event) = network_event {
 						self.handle_network_event(network_event).await;
 					} else {
@@ -289,6 +301,14 @@ where
 						return;
 					}
 				},
+				sync_event = self.sync_event_stream.next() => {
+					if let Some(sync_event) = sync_event {
+						self.handle_sync_event(sync_event);
+					} else {
+						// Syncing has seemingly closed. Closing as well.
+						return;
+					}
+				}
 				message = self.from_controller.select_next_some() => {
 					match message {
 						ToHandler::PropagateTransaction(hash) => self.propagate_transaction(&hash),
@@ -299,13 +319,12 @@ where
 		}
 	}
 
-	async fn handle_network_event(&mut self, event: Event) {
+	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
-			Event::Dht(_) => {},
-			Event::SyncConnected { remote } => {
+			SyncEvent::PeerConnected(remote) => {
 				let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
 					.collect::<multiaddr::Multiaddr>();
-				let result = self.service.add_peers_to_reserved_set(
+				let result = self.network.add_peers_to_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(addr).collect(),
 				);
@@ -313,13 +332,18 @@ where
 					log::error!(target: "sync", "Add reserved peer failed: {}", err);
 				}
 			},
-			Event::SyncDisconnected { remote } => {
-				self.service.remove_peers_from_reserved_set(
+			SyncEvent::PeerDisconnected(remote) => {
+				self.network.remove_peers_from_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(remote).collect(),
 				);
 			},
+		}
+	}
 
+	async fn handle_network_event(&mut self, event: Event) {
+		match event {
+			Event::Dht(_) => {},
 			Event::NotificationStreamOpened { remote, protocol, role, .. }
 				if protocol == self.protocol_name =>
 			{
@@ -365,7 +389,7 @@ where
 	/// Called when peer sends us new transactions
 	fn on_transactions(&mut self, who: PeerId, transactions: Transactions<B::Extrinsic>) {
 		// Accept transactions only when node is not major syncing
-		if self.service.is_major_syncing() {
+		if self.sync.is_major_syncing() {
 			trace!(target: "sync", "{} Ignoring transactions while major syncing", who);
 			return
 		}
@@ -385,7 +409,7 @@ where
 				let hash = self.transaction_pool.hash_of(&t);
 				peer.known_transactions.insert(hash.clone());
 
-				self.service.report_peer(who, rep::ANY_TRANSACTION);
+				self.network.report_peer(who, rep::ANY_TRANSACTION);
 
 				match self.pending_transactions_peers.entry(hash.clone()) {
 					Entry::Vacant(entry) => {
@@ -406,9 +430,9 @@ where
 	fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) {
 		match import {
 			TransactionImport::KnownGood =>
-				self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND),
-			TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION),
-			TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION),
+				self.network.report_peer(who, rep::ANY_TRANSACTION_REFUND),
+			TransactionImport::NewGood => self.network.report_peer(who, rep::GOOD_TRANSACTION),
+			TransactionImport::Bad => self.network.report_peer(who, rep::BAD_TRANSACTION),
 			TransactionImport::None => {},
 		}
 	}
@@ -416,7 +440,7 @@ where
 	/// Propagate one transaction.
 	pub fn propagate_transaction(&mut self, hash: &H) {
 		// Accept transactions only when node is not major syncing
-		if self.service.is_major_syncing() {
+		if self.sync.is_major_syncing() {
 			return
 		}
 
@@ -453,7 +477,7 @@ where
 					propagated_to.entry(hash).or_default().push(who.to_base58());
 				}
 				trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who);
-				self.service
+				self.network
 					.write_notification(*who, self.protocol_name.clone(), to_send.encode());
 			}
 		}
@@ -468,7 +492,7 @@ where
 	/// Call when we must propagate ready transactions to peers.
 	fn propagate_transactions(&mut self) {
 		// Accept transactions only when node is not major syncing
-		if self.service.is_major_syncing() {
+		if self.sync.is_major_syncing() {
 			return
 		}
 
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index 2c690dcd2f8..ea4b6300031 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -38,18 +38,19 @@ use sc_client_db::{Backend, DatabaseSettings};
 use sc_consensus::import_queue::ImportQueue;
 use sc_executor::RuntimeVersionOf;
 use sc_keystore::LocalKeystore;
-use sc_network::{config::SyncMode, NetworkService};
+use sc_network::NetworkService;
 use sc_network_bitswap::BitswapRequestHandler;
 use sc_network_common::{
+	config::SyncMode,
 	protocol::role::Roles,
-	service::{NetworkStateInfo, NetworkStatusProvider},
+	service::{NetworkEventStream, NetworkStateInfo, NetworkStatusProvider},
 	sync::warp::WarpSyncParams,
 };
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
-	block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider,
-	state_request_handler::StateRequestHandler,
-	warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync,
+	block_request_handler::BlockRequestHandler, engine::SyncingEngine,
+	service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler,
+	warp_request_handler::RequestHandler as WarpSyncRequestHandler, SyncingService,
 };
 use sc_rpc::{
 	author::AuthorApiServer,
@@ -349,12 +350,7 @@ where
 
 /// Shared network instance implementing a set of mandatory traits.
 pub trait SpawnTaskNetwork<Block: BlockT>:
-	sc_offchain::NetworkProvider
-	+ NetworkStateInfo
-	+ NetworkStatusProvider<Block>
-	+ Send
-	+ Sync
-	+ 'static
+	sc_offchain::NetworkProvider + NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static
 {
 }
 
@@ -363,7 +359,7 @@ where
 	Block: BlockT,
 	T: sc_offchain::NetworkProvider
 		+ NetworkStateInfo
-		+ NetworkStatusProvider<Block>
+		+ NetworkStatusProvider
 		+ Send
 		+ Sync
 		+ 'static,
@@ -394,6 +390,8 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
 	/// Controller for transactions handlers
 	pub tx_handler_controller:
 		sc_network_transactions::TransactionsHandlerController<<TBl as BlockT>::Hash>,
+	/// Syncing service.
+	pub sync_service: Arc<SyncingService<TBl>>,
 	/// Telemetry instance for this node.
 	pub telemetry: Option<&'a mut Telemetry>,
 }
@@ -471,6 +469,7 @@ where
 		network,
 		system_rpc_tx,
 		tx_handler_controller,
+		sync_service,
 		telemetry,
 	} = params;
 
@@ -533,7 +532,12 @@ where
 	spawn_handle.spawn(
 		"telemetry-periodic-send",
 		None,
-		metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()),
+		metrics_service.run(
+			client.clone(),
+			transaction_pool.clone(),
+			network.clone(),
+			sync_service.clone(),
+		),
 	);
 
 	let rpc_id_provider = config.rpc_id_provider.take();
@@ -560,7 +564,12 @@ where
 	spawn_handle.spawn(
 		"informant",
 		None,
-		sc_informant::build(client.clone(), network, config.informant_output_format),
+		sc_informant::build(
+			client.clone(),
+			network,
+			sync_service.clone(),
+			config.informant_output_format,
+		),
 	);
 
 	task_manager.keep_alive((config.base_path, rpc));
@@ -771,6 +780,7 @@ pub fn build_network<TBl, TExPool, TImpQu, TCl>(
 		TracingUnboundedSender<sc_rpc::system::Request<TBl>>,
 		sc_network_transactions::TransactionsHandlerController<<TBl as BlockT>::Hash>,
 		NetworkStarter,
+		Arc<SyncingService<TBl>>,
 	),
 	Error,
 >
@@ -876,27 +886,23 @@ where
 	};
 
 	let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new();
-	let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new(
-		match config.network.sync_mode {
-			SyncMode::Full => sc_network_common::sync::SyncMode::Full,
-			SyncMode::Fast { skip_proofs, storage_chain_mode } =>
-				sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode },
-			SyncMode::Warp => sc_network_common::sync::SyncMode::Warp,
-		},
+	let (engine, sync_service, block_announce_config) = SyncingEngine::new(
+		Roles::from(&config.role),
 		client.clone(),
+		config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(),
+		&config.network,
 		protocol_id.clone(),
 		&config.chain_spec.fork_id().map(ToOwned::to_owned),
-		Roles::from(&config.role),
 		block_announce_validator,
-		config.network.max_parallel_downloads,
 		warp_sync_params,
-		config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(),
 		chain_sync_network_handle,
 		import_queue.service(),
 		block_request_protocol_config.name.clone(),
 		state_request_protocol_config.name.clone(),
 		warp_sync_protocol_config.as_ref().map(|config| config.name.clone()),
 	)?;
+	let sync_service_import_queue = sync_service.clone();
+	let sync_service = Arc::new(sync_service);
 
 	request_response_protocol_configs.push(config.network.ipfs_server.then(|| {
 		let (handler, protocol_config) = BitswapRequestHandler::new(client.clone());
@@ -916,8 +922,6 @@ where
 		chain: client.clone(),
 		protocol_id: protocol_id.clone(),
 		fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned),
-		chain_sync: Box::new(chain_sync),
-		chain_sync_service: Box::new(chain_sync_service.clone()),
 		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
 		block_announce_config,
 		request_response_protocol_configs: request_response_protocol_configs
@@ -953,6 +957,7 @@ where
 
 	let (tx_handler, tx_handler_controller) = transactions_handler_proto.build(
 		network.clone(),
+		sync_service.clone(),
 		Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }),
 		config.prometheus_config.as_ref().map(|config| &config.registry),
 	)?;
@@ -963,11 +968,10 @@ where
 		Some("networking"),
 		chain_sync_network_provider.run(network.clone()),
 	);
-	spawn_handle.spawn(
-		"import-queue",
-		None,
-		import_queue.run(Box::new(chain_sync_service.clone())),
-	);
+	spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue)));
+
+	let event_stream = network.event_stream("syncing");
+	spawn_handle.spawn("syncing", None, engine.run(event_stream));
 
 	let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000);
 	spawn_handle.spawn(
@@ -976,7 +980,7 @@ where
 		build_system_rpc_future(
 			config.role.clone(),
 			network_mut.service().clone(),
-			chain_sync_service.clone(),
+			sync_service.clone(),
 			client.clone(),
 			system_rpc_rx,
 			has_bootnodes,
@@ -984,7 +988,7 @@ where
 	);
 
 	let future =
-		build_network_future(network_mut, client, chain_sync_service, config.announce_block);
+		build_network_future(network_mut, client, sync_service.clone(), config.announce_block);
 
 	// TODO: Normally, one is supposed to pass a list of notifications protocols supported by the
 	// node through the `NetworkConfiguration` struct. But because this function doesn't know in
@@ -1022,7 +1026,13 @@ where
 		future.await
 	});
 
-	Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx)))
+	Ok((
+		network,
+		system_rpc_tx,
+		tx_handler_controller,
+		NetworkStarter(network_start_tx),
+		sync_service.clone(),
+	))
 }
 
 /// Object used to start the network.
diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs
index 7f3a2e3c0bb..8e843b58f28 100644
--- a/substrate/client/service/src/config.rs
+++ b/substrate/client/service/src/config.rs
@@ -22,11 +22,14 @@ pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStra
 pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode};
 pub use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy};
 pub use sc_network::{
-	config::{NetworkConfiguration, NodeKeyConfig, Role},
+	config::{NetworkConfiguration, Role},
 	Multiaddr,
 };
 pub use sc_network_common::{
-	config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig},
+	config::{
+		MultiaddrWithPeerId, NodeKeyConfig, NonDefaultSetConfig, ProtocolId, SetConfig,
+		TransportConfig,
+	},
 	request_responses::{
 		IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
 	},
@@ -34,7 +37,7 @@ pub use sc_network_common::{
 
 use prometheus_endpoint::Registry;
 use sc_chain_spec::ChainSpec;
-use sc_network::config::SyncMode;
+use sc_network_common::config::SyncMode;
 pub use sc_telemetry::TelemetryEndpoints;
 pub use sc_transaction_pool::Options as TransactionPoolOptions;
 use sp_core::crypto::SecretString;
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index 6bafa9936c0..8e674ca44a1 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -46,7 +46,7 @@ use sc_network_common::{
 	config::MultiaddrWithPeerId,
 	service::{NetworkBlock, NetworkPeers},
 };
-use sc_network_sync::service::chain_sync::ChainSyncInterfaceHandle;
+use sc_network_sync::SyncingService;
 use sc_utils::mpsc::TracingUnboundedReceiver;
 use sp_blockchain::HeaderMetadata;
 use sp_consensus::SyncOracle;
@@ -158,9 +158,9 @@ async fn build_network_future<
 		+ 'static,
 	H: sc_network_common::ExHashT,
 >(
-	network: sc_network::NetworkWorker<B, H, C>,
+	network: sc_network::NetworkWorker<B, H>,
 	client: Arc<C>,
-	chain_sync_service: ChainSyncInterfaceHandle<B>,
+	sync_service: Arc<SyncingService<B>>,
 	announce_imported_blocks: bool,
 ) {
 	let mut imported_blocks_stream = client.import_notification_stream().fuse();
@@ -168,8 +168,6 @@ async fn build_network_future<
 	// Stream of finalized blocks reported by the client.
 	let mut finality_notification_stream = client.finality_notification_stream().fuse();
 
-	let network_service = network.service().clone();
-
 	let network_run = network.run().fuse();
 	pin_mut!(network_run);
 
@@ -188,11 +186,11 @@ async fn build_network_future<
 				};
 
 				if announce_imported_blocks {
-					network_service.announce_block(notification.hash, None);
+					sync_service.announce_block(notification.hash, None);
 				}
 
 				if notification.is_new_best {
-					network_service.new_best_block_imported(
+					sync_service.new_best_block_imported(
 						notification.hash,
 						*notification.header.number(),
 					);
@@ -201,7 +199,7 @@ async fn build_network_future<
 
 			// List of blocks that the client has finalized.
 			notification = finality_notification_stream.select_next_some() => {
-				chain_sync_service.on_block_finalized(notification.hash, *notification.header.number());
+				sync_service.on_block_finalized(notification.hash, notification.header);
 			}
 
 			// Drive the network. Shut down the network future if `NetworkWorker` has terminated.
@@ -228,7 +226,7 @@ async fn build_system_rpc_future<
 >(
 	role: Role,
 	network_service: Arc<sc_network::NetworkService<B, H>>,
-	chain_sync_service: ChainSyncInterfaceHandle<B>,
+	sync_service: Arc<SyncingService<B>>,
 	client: Arc<C>,
 	mut rpc_rx: TracingUnboundedReceiver<sc_rpc::system::Request<B>>,
 	should_have_peers: bool,
@@ -244,23 +242,21 @@ async fn build_system_rpc_future<
 		};
 
 		match req {
-			sc_rpc::system::Request::Health(sender) => {
-				let peers = network_service.peers_debug_info().await;
-				if let Ok(peers) = peers {
+			sc_rpc::system::Request::Health(sender) => match sync_service.peers_info().await {
+				Ok(info) => {
 					let _ = sender.send(sc_rpc::system::Health {
-						peers: peers.len(),
-						is_syncing: network_service.is_major_syncing(),
+						peers: info.len(),
+						is_syncing: sync_service.is_major_syncing(),
 						should_have_peers,
 					});
-				} else {
-					break
-				}
+				},
+				Err(_) => log::error!("`SyncingEngine` shut down"),
 			},
 			sc_rpc::system::Request::LocalPeerId(sender) => {
 				let _ = sender.send(network_service.local_peer_id().to_base58());
 			},
 			sc_rpc::system::Request::LocalListenAddresses(sender) => {
-				let peer_id = network_service.local_peer_id().into();
+				let peer_id = (network_service.local_peer_id()).into();
 				let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id);
 				let addresses = network_service
 					.listen_addresses()
@@ -269,12 +265,10 @@ async fn build_system_rpc_future<
 					.collect();
 				let _ = sender.send(addresses);
 			},
-			sc_rpc::system::Request::Peers(sender) => {
-				let peers = network_service.peers_debug_info().await;
-				if let Ok(peers) = peers {
+			sc_rpc::system::Request::Peers(sender) => match sync_service.peers_info().await {
+				Ok(info) => {
 					let _ = sender.send(
-						peers
-							.into_iter()
+						info.into_iter()
 							.map(|(peer_id, p)| sc_rpc::system::PeerInfo {
 								peer_id: peer_id.to_base58(),
 								roles: format!("{:?}", p.roles),
@@ -283,9 +277,8 @@ async fn build_system_rpc_future<
 							})
 							.collect(),
 					);
-				} else {
-					break
-				}
+				},
+				Err(_) => log::error!("`SyncingEngine` shut down"),
 			},
 			sc_rpc::system::Request::NetworkState(sender) => {
 				let network_state = network_service.network_state().await;
@@ -339,21 +332,21 @@ async fn build_system_rpc_future<
 			sc_rpc::system::Request::SyncState(sender) => {
 				use sc_rpc::system::SyncState;
 
-				let best_number = client.info().best_number;
-
-				let Ok(status) = chain_sync_service.status().await else {
-					debug!("`ChainSync` has terminated, shutting down the system RPC future.");
-					return
-				};
-
-				let _ = sender.send(SyncState {
-					starting_block,
-					current_block: best_number,
-					highest_block: status.best_seen_block.unwrap_or(best_number),
-				});
+				match sync_service.best_seen_block().await {
+					Ok(best_seen_block) => {
+						let best_number = client.info().best_number;
+						let _ = sender.send(SyncState {
+							starting_block,
+							current_block: best_number,
+							highest_block: best_seen_block.unwrap_or(best_number),
+						});
+					},
+					Err(_) => log::error!("`SyncingEngine` shut down"),
+				}
 			},
 		}
 	}
+
 	debug!("`NetworkWorker` has terminated, shutting down the system RPC future.");
 }
 
diff --git a/substrate/client/service/src/metrics.rs b/substrate/client/service/src/metrics.rs
index 19c64e95366..967e3133dbe 100644
--- a/substrate/client/service/src/metrics.rs
+++ b/substrate/client/service/src/metrics.rs
@@ -23,7 +23,10 @@ use futures_timer::Delay;
 use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64};
 use sc_client_api::{ClientInfo, UsageProvider};
 use sc_network::config::Role;
-use sc_network_common::service::{NetworkStatus, NetworkStatusProvider};
+use sc_network_common::{
+	service::{NetworkStatus, NetworkStatusProvider},
+	sync::{SyncStatus, SyncStatusProvider},
+};
 use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO};
 use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus};
 use sc_utils::metrics::register_globals;
@@ -175,16 +178,18 @@ impl MetricsService {
 	/// Returns a never-ending `Future` that performs the
 	/// metric and telemetry updates with information from
 	/// the given sources.
-	pub async fn run<TBl, TExPool, TCl, TNet>(
+	pub async fn run<TBl, TExPool, TCl, TNet, TSync>(
 		mut self,
 		client: Arc<TCl>,
 		transactions: Arc<TExPool>,
 		network: TNet,
+		syncing: TSync,
 	) where
 		TBl: Block,
 		TCl: ProvideRuntimeApi<TBl> + UsageProvider<TBl>,
 		TExPool: MaintainedTransactionPool<Block = TBl, Hash = <TBl as Block>::Hash>,
-		TNet: NetworkStatusProvider<TBl>,
+		TNet: NetworkStatusProvider,
+		TSync: SyncStatusProvider<TBl>,
 	{
 		let mut timer = Delay::new(Duration::from_secs(0));
 		let timer_interval = Duration::from_secs(5);
@@ -196,8 +201,11 @@ impl MetricsService {
 			// Try to get the latest network information.
 			let net_status = network.status().await.ok();
 
+			// Try to get the latest syncing information.
+			let sync_status = syncing.status().await.ok();
+
 			// Update / Send the metrics.
-			self.update(&client.usage_info(), &transactions.status(), net_status);
+			self.update(&client.usage_info(), &transactions.status(), net_status, sync_status);
 
 			// Schedule next tick.
 			timer.reset(timer_interval);
@@ -208,7 +216,8 @@ impl MetricsService {
 		&mut self,
 		info: &ClientInfo<T>,
 		txpool_status: &PoolStatus,
-		net_status: Option<NetworkStatus<T>>,
+		net_status: Option<NetworkStatus>,
+		sync_status: Option<SyncStatus<T>>,
 	) {
 		let now = Instant::now();
 		let elapsed = (now - self.last_update).as_secs();
@@ -273,10 +282,12 @@ impl MetricsService {
 				"bandwidth_download" => avg_bytes_per_sec_inbound,
 				"bandwidth_upload" => avg_bytes_per_sec_outbound,
 			);
+		}
 
+		if let Some(sync_status) = sync_status {
 			if let Some(metrics) = self.metrics.as_ref() {
 				let best_seen_block: Option<u64> =
-					net_status.best_seen_block.map(|num: NumberFor<T>| {
+					sync_status.best_seen_block.map(|num: NumberFor<T>| {
 						UniqueSaturatedInto::<u64>::unique_saturated_into(num)
 					});
 
diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml
index 488fe76c333..f58b8c295ed 100644
--- a/substrate/client/service/test/Cargo.toml
+++ b/substrate/client/service/test/Cargo.toml
@@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo
 sc-executor = { version = "0.10.0-dev", path = "../../executor" }
 sc-network = { version = "0.10.0-dev", path = "../../network" }
 sc-network-common = { version = "0.10.0-dev", path = "../../network/common" }
+sc-network-sync = { version = "0.10.0-dev", path = "../../network/sync" }
 sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" }
 sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" }
 sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 655aff24230..b1a09a0620f 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -27,6 +27,7 @@ use sc_network_common::{
 	config::{MultiaddrWithPeerId, TransportConfig},
 	service::{NetworkBlock, NetworkPeers, NetworkStateInfo},
 };
+use sc_network_sync::SyncingService;
 use sc_service::{
 	client::Client,
 	config::{BasePath, DatabaseSource, KeystoreConfig},
@@ -79,6 +80,7 @@ pub trait TestNetNode:
 	fn network(
 		&self,
 	) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>>;
+	fn sync(&self) -> &Arc<SyncingService<Self::Block>>;
 	fn spawn_handle(&self) -> SpawnTaskHandle;
 }
 
@@ -87,6 +89,7 @@ pub struct TestNetComponents<TBl: BlockT, TBackend, TExec, TRtApi, TExPool> {
 	client: Arc<Client<TBackend, TExec, TBl, TRtApi>>,
 	transaction_pool: Arc<TExPool>,
 	network: Arc<sc_network::NetworkService<TBl, <TBl as BlockT>::Hash>>,
+	sync: Arc<SyncingService<TBl>>,
 }
 
 impl<TBl: BlockT, TBackend, TExec, TRtApi, TExPool>
@@ -96,9 +99,16 @@ impl<TBl: BlockT, TBackend, TExec, TRtApi, TExPool>
 		task_manager: TaskManager,
 		client: Arc<Client<TBackend, TExec, TBl, TRtApi>>,
 		network: Arc<sc_network::NetworkService<TBl, <TBl as BlockT>::Hash>>,
+		sync: Arc<SyncingService<TBl>>,
 		transaction_pool: Arc<TExPool>,
 	) -> Self {
-		Self { client, transaction_pool, network, task_manager: Arc::new(Mutex::new(task_manager)) }
+		Self {
+			client,
+			sync,
+			transaction_pool,
+			network,
+			task_manager: Arc::new(Mutex::new(task_manager)),
+		}
 	}
 }
 
@@ -111,6 +121,7 @@ impl<TBl: BlockT, TBackend, TExec, TRtApi, TExPool> Clone
 			client: self.client.clone(),
 			transaction_pool: self.transaction_pool.clone(),
 			network: self.network.clone(),
+			sync: self.sync.clone(),
 		}
 	}
 }
@@ -151,6 +162,9 @@ where
 	) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>> {
 		self.network.clone()
 	}
+	fn sync(&self) -> &Arc<SyncingService<Self::Block>> {
+		&self.sync
+	}
 	fn spawn_handle(&self) -> SpawnTaskHandle {
 		self.task_manager.lock().spawn_handle()
 	}
@@ -477,7 +491,7 @@ pub fn sync<G, E, Fb, F, B, ExF, U>(
 		let info = network.full_nodes[0].1.client().info();
 		network.full_nodes[0]
 			.1
-			.network()
+			.sync()
 			.new_best_block_imported(info.best_hash, info.best_number);
 		network.full_nodes[0].3.clone()
 	};
-- 
GitLab