diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 6896d65f44c0a267345996a9d64d7a71d07e5507..64e8cfa82d25100e668ba92392f6a0d849564821 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -162,16 +162,6 @@ version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
 
-[[package]]
-name = "async-attributes"
-version = "1.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5"
-dependencies = [
- "quote",
- "syn",
-]
-
 [[package]]
 name = "async-channel"
 version = "1.6.1"
@@ -250,67 +240,6 @@ dependencies = [
  "event-listener",
 ]
 
-[[package]]
-name = "async-process"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c"
-dependencies = [
- "async-io",
- "blocking",
- "cfg-if",
- "event-listener",
- "futures-lite",
- "libc",
- "once_cell",
- "signal-hook",
- "winapi",
-]
-
-[[package]]
-name = "async-std"
-version = "1.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52580991739c5cdb36cde8b2a516371c0a3b70dda36d916cc08b82372916808c"
-dependencies = [
- "async-attributes",
- "async-channel",
- "async-global-executor",
- "async-io",
- "async-lock",
- "async-process",
- "crossbeam-utils",
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-lite",
- "gloo-timers",
- "kv-log-macro",
- "log",
- "memchr",
- "num_cpus",
- "once_cell",
- "pin-project-lite 0.2.6",
- "pin-utils",
- "slab",
- "wasm-bindgen-futures",
-]
-
-[[package]]
-name = "async-std-resolver"
-version = "0.22.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723"
-dependencies = [
- "async-std",
- "async-trait",
- "futures-io",
- "futures-util",
- "pin-utils",
- "socket2",
- "trust-dns-resolver",
-]
-
 [[package]]
 name = "async-stream"
 version = "0.3.2"
@@ -381,9 +310,9 @@ dependencies = [
 
 [[package]]
 name = "autocfg"
-version = "1.0.1"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
 [[package]]
 name = "backtrace"
@@ -2755,19 +2684,6 @@ dependencies = [
  "regex",
 ]
 
-[[package]]
-name = "gloo-timers"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f"
-dependencies = [
- "futures-channel",
- "futures-core",
- "js-sys",
- "wasm-bindgen",
- "web-sys",
-]
-
 [[package]]
 name = "group"
 version = "0.12.0"
@@ -3459,15 +3375,6 @@ dependencies = [
  "substrate-wasm-builder",
 ]
 
-[[package]]
-name = "kv-log-macro"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
-dependencies = [
- "log",
-]
-
 [[package]]
 name = "kvdb"
 version = "0.12.0"
@@ -3630,7 +3537,6 @@ version = "0.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2322c9fb40d99101def6a01612ee30500c89abbbecb6297b3cd252903a4c1720"
 dependencies = [
- "async-std-resolver",
  "futures",
  "libp2p-core",
  "log",
@@ -3694,7 +3600,6 @@ version = "0.41.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "761704e727f7d68d58d7bc2231eafae5fc1b9814de24290f126df09d4bd37a15"
 dependencies = [
- "async-io",
  "data-encoding",
  "dns-parser",
  "futures",
@@ -3705,6 +3610,7 @@ dependencies = [
  "rand 0.8.5",
  "smallvec",
  "socket2",
+ "tokio",
  "void",
 ]
 
@@ -3833,7 +3739,6 @@ version = "0.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9839d96761491c6d3e238e70554b856956fca0ab60feb9de2cd08eed4473fa92"
 dependencies = [
- "async-io",
  "futures",
  "futures-timer",
  "if-watch",
@@ -3841,6 +3746,7 @@ dependencies = [
  "libp2p-core",
  "log",
  "socket2",
+ "tokio",
 ]
 
 [[package]]
@@ -4039,7 +3945,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
 dependencies = [
  "cfg-if",
- "value-bag",
 ]
 
 [[package]]
@@ -4234,7 +4139,6 @@ dependencies = [
 name = "mmr-gadget"
 version = "4.0.0-dev"
 dependencies = [
- "async-std",
  "beefy-primitives",
  "futures",
  "log",
@@ -4553,7 +4457,6 @@ version = "3.0.0-dev"
 dependencies = [
  "array-bytes",
  "assert_cmd",
- "async-std",
  "clap 4.0.11",
  "clap_complete",
  "criterion",
@@ -4628,6 +4531,7 @@ dependencies = [
  "substrate-rpc-client",
  "tempfile",
  "tokio",
+ "tokio-util",
  "try-runtime-cli",
  "wait-timeout",
 ]
@@ -7817,6 +7721,7 @@ dependencies = [
  "substrate-test-runtime-client",
  "tempfile",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -7865,6 +7770,7 @@ dependencies = [
  "substrate-prometheus-endpoint",
  "substrate-test-runtime-client",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -8205,7 +8111,6 @@ version = "0.10.0-dev"
 dependencies = [
  "array-bytes",
  "assert_matches",
- "async-std",
  "async-trait",
  "asynchronous-codec",
  "bitflags",
@@ -8250,6 +8155,8 @@ dependencies = [
  "substrate-test-runtime-client",
  "tempfile",
  "thiserror",
+ "tokio",
+ "tokio-util",
  "unsigned-varint",
  "zeroize",
 ]
@@ -8310,7 +8217,6 @@ name = "sc-network-gossip"
 version = "0.10.0-dev"
 dependencies = [
  "ahash",
- "async-std",
  "futures",
  "futures-timer",
  "libp2p",
@@ -8322,6 +8228,7 @@ dependencies = [
  "sp-runtime",
  "substrate-prometheus-endpoint",
  "substrate-test-runtime-client",
+ "tokio",
  "tracing",
 ]
 
@@ -8350,7 +8257,6 @@ name = "sc-network-sync"
 version = "0.10.0-dev"
 dependencies = [
  "array-bytes",
- "async-std",
  "async-trait",
  "fork-tree",
  "futures",
@@ -8379,13 +8285,13 @@ dependencies = [
  "sp-tracing",
  "substrate-test-runtime-client",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
 name = "sc-network-test"
 version = "0.8.0"
 dependencies = [
- "async-std",
  "async-trait",
  "futures",
  "futures-timer",
@@ -8409,6 +8315,7 @@ dependencies = [
  "sp-tracing",
  "substrate-test-runtime",
  "substrate-test-runtime-client",
+ "tokio",
 ]
 
 [[package]]
@@ -8598,7 +8505,6 @@ dependencies = [
 name = "sc-service"
 version = "0.10.0-dev"
 dependencies = [
- "async-std",
  "async-trait",
  "directories",
  "exit-future",
@@ -9196,16 +9102,6 @@ version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d"
 
-[[package]]
-name = "signal-hook"
-version = "0.3.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef33d6d0cd06e0840fba9985aab098c147e67e05cee14d412d3345ed14ff30ac"
-dependencies = [
- "libc",
- "signal-hook-registry",
-]
-
 [[package]]
 name = "signal-hook-registry"
 version = "1.3.0"
@@ -10762,16 +10658,16 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
 
 [[package]]
 name = "tokio"
-version = "1.17.0"
+version = "1.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
+checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3"
 dependencies = [
+ "autocfg",
  "bytes",
  "libc",
  "memchr",
  "mio",
  "num_cpus",
- "once_cell",
  "parking_lot 0.12.1",
  "pin-project-lite 0.2.6",
  "signal-hook-registry",
@@ -10828,9 +10724,9 @@ dependencies = [
 
 [[package]]
 name = "tokio-util"
-version = "0.7.1"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764"
+checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
 dependencies = [
  "bytes",
  "futures-core",
@@ -11018,6 +10914,7 @@ dependencies = [
  "smallvec",
  "thiserror",
  "tinyvec",
+ "tokio",
  "tracing",
  "url",
 ]
@@ -11037,6 +10934,7 @@ dependencies = [
  "resolv-conf",
  "smallvec",
  "thiserror",
+ "tokio",
  "tracing",
  "trust-dns-proto",
 ]
@@ -11223,16 +11121,6 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
 
-[[package]]
-name = "value-bag"
-version = "1.0.0-alpha.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55"
-dependencies = [
- "ctor",
- "version_check",
-]
-
 [[package]]
 name = "vcpkg"
 version = "0.2.11"
diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml
index d56764f9e2040f4b71aa3238b083a8f8905e3814..114d324aa1591ac1f80ad267dd6553d68843043f 100644
--- a/substrate/bin/node/cli/Cargo.toml
+++ b/substrate/bin/node/cli/Cargo.toml
@@ -122,10 +122,10 @@ nix = "0.23"
 serde_json = "1.0"
 regex = "1.6.0"
 platforms = "2.0"
-async-std = { version = "1.11.0", features = ["attributes"] }
 soketto = "0.7.1"
 criterion = { version = "0.3.5", features = ["async_tokio"] }
-tokio = { version = "1.17.0", features = ["macros", "time", "parking_lot"] }
+tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] }
+tokio-util = { version = "0.7.4", features = ["compat"] }
 wait-timeout = "0.2"
 substrate-rpc-client = { path = "../../../utils/frame/rpc/client" }
 pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" }
diff --git a/substrate/bin/node/cli/tests/telemetry.rs b/substrate/bin/node/cli/tests/telemetry.rs
index bef4e4ea03048dc301d72511c21b285d376648ee..98cf0b3af32b2557095554976cc616582b59711e 100644
--- a/substrate/bin/node/cli/tests/telemetry.rs
+++ b/substrate/bin/node/cli/tests/telemetry.rs
@@ -26,7 +26,7 @@ use std::process;
 pub mod common;
 pub mod websocket_server;
 
-#[async_std::test]
+#[tokio::test]
 async fn telemetry_works() {
 	let config = websocket_server::Config {
 		capacity: 1,
@@ -38,7 +38,7 @@ async fn telemetry_works() {
 
 	let addr = server.local_addr().unwrap();
 
-	let server_task = async_std::task::spawn(async move {
+	let server_task = tokio::spawn(async move {
 		loop {
 			use websocket_server::Event;
 			match server.next_event().await {
@@ -78,7 +78,7 @@ async fn telemetry_works() {
 		.spawn()
 		.unwrap();
 
-	server_task.await;
+	server_task.await.expect("server task panicked");
 
 	assert!(substrate.try_wait().unwrap().is_none(), "the process should still be running");
 
diff --git a/substrate/bin/node/cli/tests/websocket_server.rs b/substrate/bin/node/cli/tests/websocket_server.rs
index 513497c6cddb5006785ac87a518b05b0e4e3e628..1e7450995230c734c9e8b587cd636506f8f8e48c 100644
--- a/substrate/bin/node/cli/tests/websocket_server.rs
+++ b/substrate/bin/node/cli/tests/websocket_server.rs
@@ -16,11 +16,12 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use async_std::net::{TcpListener, TcpStream};
 use core::pin::Pin;
 use futures::prelude::*;
 use soketto::handshake::{server::Response, Server};
 use std::{io, net::SocketAddr};
+use tokio::net::{TcpListener, TcpStream};
+use tokio_util::compat::{Compat, TokioAsyncReadCompatExt};
 
 /// Configuration for a [`WsServer`].
 pub struct Config {
@@ -71,8 +72,12 @@ pub struct WsServer {
 	negotiating: stream::FuturesUnordered<
 		Pin<
 			Box<
-				dyn Future<Output = Result<Server<'static, TcpStream>, Box<dyn std::error::Error>>>
-					+ Send,
+				dyn Future<
+						Output = Result<
+							Server<'static, Compat<TcpStream>>,
+							Box<dyn std::error::Error>,
+						>,
+					> + Send,
 			>,
 		>,
 	>,
@@ -120,7 +125,7 @@ impl WsServer {
 		let pending_incoming = self.pending_incoming.take().expect("no pending socket");
 
 		self.negotiating.push(Box::pin(async move {
-			let mut server = Server::new(pending_incoming);
+			let mut server = Server::new(pending_incoming.compat());
 
 			let websocket_key = match server.receive_request().await {
 				Ok(req) => req.key(),
diff --git a/substrate/client/beefy/Cargo.toml b/substrate/client/beefy/Cargo.toml
index 999c5a298fe57d6fbe9465cdfaa20cb42f4ae35f..b6a77f00e7199337e306a6b702aaacaacd4ad89b 100644
--- a/substrate/client/beefy/Cargo.toml
+++ b/substrate/client/beefy/Cargo.toml
@@ -44,7 +44,7 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 serde = "1.0.136"
 strum = { version = "0.24.1", features = ["derive"] }
 tempfile = "3.1.0"
-tokio = "1.17.0"
+tokio = "1.22.0"
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 sc-network-test = { version = "0.8.0", path = "../network/test" }
 sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" }
diff --git a/substrate/client/beefy/rpc/Cargo.toml b/substrate/client/beefy/rpc/Cargo.toml
index 71220388505b82fb7dc255821bbae83db003ba82..11ad15af1983d1306864a891cdcc03f004c2320a 100644
--- a/substrate/client/beefy/rpc/Cargo.toml
+++ b/substrate/client/beefy/rpc/Cargo.toml
@@ -29,4 +29,4 @@ sc-rpc = { version = "4.0.0-dev", features = [
 	"test-helpers",
 ], path = "../../rpc" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" }
-tokio = { version = "1.17.0", features = ["macros"] }
+tokio = { version = "1.22.0", features = ["macros"] }
diff --git a/substrate/client/beefy/src/aux_schema.rs b/substrate/client/beefy/src/aux_schema.rs
index e9a2e9b9e61266bad57bb6cd3935daa19c050278..9d6a4292f32d41b20dde1cc29464a273fad1f334 100644
--- a/substrate/client/beefy/src/aux_schema.rs
+++ b/substrate/client/beefy/src/aux_schema.rs
@@ -77,6 +77,7 @@ pub(crate) mod tests {
 	use super::*;
 	use crate::tests::BeefyTestNet;
 	use sc_network_test::TestNetFactory;
+	use tokio::runtime::Runtime;
 
 	// also used in tests.rs
 	pub fn verify_persisted_version<B: BlockT, BE: Backend<B>>(backend: &BE) -> bool {
@@ -86,7 +87,8 @@ pub(crate) mod tests {
 
 	#[test]
 	fn should_load_persistent_sanity_checks() {
-		let mut net = BeefyTestNet::new(1);
+		let runtime = Runtime::new().unwrap();
+		let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 		let backend = net.peer(0).client().as_backend();
 
 		// version not available in db -> None
diff --git a/substrate/client/beefy/src/tests.rs b/substrate/client/beefy/src/tests.rs
index 6b9cf824d906d36947e8aec4ec28dfb3f1949f61..f6ab0dd1020f1d80a7dabdd8f5d08735ba898684 100644
--- a/substrate/client/beefy/src/tests.rs
+++ b/substrate/client/beefy/src/tests.rs
@@ -47,7 +47,7 @@ use sc_consensus::{
 use sc_network::{config::RequestResponseConfig, ProtocolName};
 use sc_network_test::{
 	Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient,
-	PeersFullClient, TestNetFactory,
+	PeersFullClient, TestNetFactory, WithRuntime,
 };
 use sc_utils::notification::NotificationReceiver;
 use serde::{Deserialize, Serialize};
@@ -64,7 +64,10 @@ use sp_runtime::{
 };
 use std::{collections::HashMap, marker::PhantomData, sync::Arc, task::Poll};
 use substrate_test_runtime_client::{runtime::Header, ClientExt};
-use tokio::{runtime::Runtime, time::Duration};
+use tokio::{
+	runtime::{Handle, Runtime},
+	time::Duration,
+};
 
 const GENESIS_HASH: H256 = H256::zero();
 fn beefy_gossip_proto_name() -> ProtocolName {
@@ -103,14 +106,23 @@ pub(crate) struct PeerData {
 		Mutex<Option<BeefyJustifsRequestHandler<Block, PeersFullClient>>>,
 }
 
-#[derive(Default)]
 pub(crate) struct BeefyTestNet {
+	rt_handle: Handle,
 	peers: Vec<BeefyPeer>,
 }
 
+impl WithRuntime for BeefyTestNet {
+	fn with_runtime(rt_handle: Handle) -> Self {
+		BeefyTestNet { rt_handle, peers: Vec::new() }
+	}
+	fn rt_handle(&self) -> &Handle {
+		&self.rt_handle
+	}
+}
+
 impl BeefyTestNet {
-	pub(crate) fn new(n_authority: usize) -> Self {
-		let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority) };
+	pub(crate) fn new(rt_handle: Handle, n_authority: usize) -> Self {
+		let mut net = BeefyTestNet::with_runtime(rt_handle);
 
 		for i in 0..n_authority {
 			let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None);
@@ -145,6 +157,7 @@ impl BeefyTestNet {
 		session_length: u64,
 		validator_set: &BeefyValidatorSet,
 		include_mmr_digest: bool,
+		runtime: &mut Runtime,
 	) {
 		self.peer(0).generate_blocks(count, BlockOrigin::File, |builder| {
 			let mut block = builder.build().unwrap().block;
@@ -162,7 +175,7 @@ impl BeefyTestNet {
 
 			block
 		});
-		self.block_until_sync();
+		runtime.block_on(self.wait_until_sync());
 	}
 }
 
@@ -534,14 +547,14 @@ fn beefy_finalizing_blocks() {
 	let session_len = 10;
 	let min_block_delta = 4;
 
-	let mut net = BeefyTestNet::new(2);
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 2);
 
 	let api = Arc::new(two_validators::TestApi {});
 	let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect();
 	runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta));
 
 	// push 42 blocks including `AuthorityChange` digests every 10 blocks.
-	net.generate_blocks_and_sync(42, session_len, &validator_set, true);
+	net.generate_blocks_and_sync(42, session_len, &validator_set, true, &mut runtime);
 
 	let net = Arc::new(Mutex::new(net));
 
@@ -574,13 +587,13 @@ fn lagging_validators() {
 	let session_len = 30;
 	let min_block_delta = 1;
 
-	let mut net = BeefyTestNet::new(2);
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 2);
 	let api = Arc::new(two_validators::TestApi {});
 	let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect();
 	runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta));
 
 	// push 62 blocks including `AuthorityChange` digests every 30 blocks.
-	net.generate_blocks_and_sync(62, session_len, &validator_set, true);
+	net.generate_blocks_and_sync(62, session_len, &validator_set, true, &mut runtime);
 
 	let net = Arc::new(Mutex::new(net));
 
@@ -657,7 +670,7 @@ fn correct_beefy_payload() {
 	let session_len = 20;
 	let min_block_delta = 2;
 
-	let mut net = BeefyTestNet::new(4);
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 4);
 
 	// Alice, Bob, Charlie will vote on good payloads
 	let good_api = Arc::new(four_validators::TestApi {});
@@ -674,7 +687,7 @@ fn correct_beefy_payload() {
 	runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta));
 
 	// push 12 blocks
-	net.generate_blocks_and_sync(12, session_len, &validator_set, false);
+	net.generate_blocks_and_sync(12, session_len, &validator_set, false, &mut runtime);
 
 	let net = Arc::new(Mutex::new(net));
 	let peers = peers.into_iter().enumerate();
@@ -713,13 +726,15 @@ fn correct_beefy_payload() {
 
 #[test]
 fn beefy_importing_blocks() {
-	use futures::{executor::block_on, future::poll_fn, task::Poll};
+	use futures::{future::poll_fn, task::Poll};
 	use sc_block_builder::BlockBuilderProvider;
 	use sc_client_api::BlockBackend;
 
 	sp_tracing::try_init_simple();
 
-	let mut net = BeefyTestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 2);
 
 	let client = net.peer(0).client().clone();
 	let (mut block_import, _, peer_data) = net.make_block_import(client.clone());
@@ -744,11 +759,15 @@ fn beefy_importing_blocks() {
 	// Import without justifications.
 	let mut justif_recv = justif_stream.subscribe();
 	assert_eq!(
-		block_on(block_import.import_block(params(block.clone(), None), HashMap::new())).unwrap(),
+		runtime
+			.block_on(block_import.import_block(params(block.clone(), None), HashMap::new()))
+			.unwrap(),
 		ImportResult::Imported(ImportedAux { is_new_best: true, ..Default::default() }),
 	);
 	assert_eq!(
-		block_on(block_import.import_block(params(block, None), HashMap::new())).unwrap(),
+		runtime
+			.block_on(block_import.import_block(params(block, None), HashMap::new()))
+			.unwrap(),
 		ImportResult::AlreadyInChain
 	);
 	// Verify no BEEFY justifications present:
@@ -762,7 +781,7 @@ fn beefy_importing_blocks() {
 			None
 		);
 		// and none sent to BEEFY worker.
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending);
 			Poll::Ready(())
 		}));
@@ -783,7 +802,9 @@ fn beefy_importing_blocks() {
 	let hashof2 = block.header.hash();
 	let mut justif_recv = justif_stream.subscribe();
 	assert_eq!(
-		block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(),
+		runtime
+			.block_on(block_import.import_block(params(block, justif), HashMap::new()))
+			.unwrap(),
 		ImportResult::Imported(ImportedAux {
 			bad_justification: false,
 			is_new_best: true,
@@ -802,7 +823,7 @@ fn beefy_importing_blocks() {
 		);
 		// but sent to BEEFY worker
 		// (worker will append it to backend when all previous mandatory justifs are there as well).
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			match justif_recv.poll_next_unpin(cx) {
 				Poll::Ready(Some(_justification)) => (),
 				v => panic!("unexpected value: {:?}", v),
@@ -826,7 +847,9 @@ fn beefy_importing_blocks() {
 	let hashof3 = block.header.hash();
 	let mut justif_recv = justif_stream.subscribe();
 	assert_eq!(
-		block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(),
+		runtime
+			.block_on(block_import.import_block(params(block, justif), HashMap::new()))
+			.unwrap(),
 		ImportResult::Imported(ImportedAux {
 			// Still `false` because we don't want to fail import on bad BEEFY justifications.
 			bad_justification: false,
@@ -845,7 +868,7 @@ fn beefy_importing_blocks() {
 			None
 		);
 		// and none sent to BEEFY worker.
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending);
 			Poll::Ready(())
 		}));
@@ -865,13 +888,13 @@ fn voter_initialization() {
 	// Should vote on all mandatory blocks no matter the `min_block_delta`.
 	let min_block_delta = 10;
 
-	let mut net = BeefyTestNet::new(2);
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 2);
 	let api = Arc::new(two_validators::TestApi {});
 	let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect();
 	runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta));
 
 	// push 26 blocks
-	net.generate_blocks_and_sync(26, session_len, &validator_set, false);
+	net.generate_blocks_and_sync(26, session_len, &validator_set, false, &mut runtime);
 	let net = Arc::new(Mutex::new(net));
 
 	// Finalize multiple blocks at once to get a burst of finality notifications right from start.
@@ -897,7 +920,7 @@ fn on_demand_beefy_justification_sync() {
 	let session_len = 5;
 	let min_block_delta = 5;
 
-	let mut net = BeefyTestNet::new(4);
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 4);
 
 	// Alice, Bob, Charlie start first and make progress through voting.
 	let api = Arc::new(four_validators::TestApi {});
@@ -914,7 +937,7 @@ fn on_demand_beefy_justification_sync() {
 	let dave_index = 3;
 
 	// push 30 blocks
-	net.generate_blocks_and_sync(30, session_len, &validator_set, false);
+	net.generate_blocks_and_sync(30, session_len, &validator_set, false, &mut runtime);
 
 	let fast_peers = fast_peers.into_iter().enumerate();
 	let net = Arc::new(Mutex::new(net));
@@ -968,11 +991,12 @@ fn on_demand_beefy_justification_sync() {
 fn should_initialize_voter_at_genesis() {
 	let keys = &[BeefyKeyring::Alice];
 	let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-	let mut net = BeefyTestNet::new(1);
+	let mut runtime = Runtime::new().unwrap();
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 	let backend = net.peer(0).client().as_backend();
 
 	// push 15 blocks with `AuthorityChange` digests every 10 blocks
-	net.generate_blocks_and_sync(15, 10, &validator_set, false);
+	net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime);
 
 	let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse();
 
@@ -1013,11 +1037,12 @@ fn should_initialize_voter_at_genesis() {
 fn should_initialize_voter_when_last_final_is_session_boundary() {
 	let keys = &[BeefyKeyring::Alice];
 	let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-	let mut net = BeefyTestNet::new(1);
+	let mut runtime = Runtime::new().unwrap();
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 	let backend = net.peer(0).client().as_backend();
 
 	// push 15 blocks with `AuthorityChange` digests every 10 blocks
-	net.generate_blocks_and_sync(15, 10, &validator_set, false);
+	net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime);
 
 	let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse();
 
@@ -1073,11 +1098,12 @@ fn should_initialize_voter_when_last_final_is_session_boundary() {
 fn should_initialize_voter_at_latest_finalized() {
 	let keys = &[BeefyKeyring::Alice];
 	let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-	let mut net = BeefyTestNet::new(1);
+	let mut runtime = Runtime::new().unwrap();
+	let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 	let backend = net.peer(0).client().as_backend();
 
 	// push 15 blocks with `AuthorityChange` digests every 10 blocks
-	net.generate_blocks_and_sync(15, 10, &validator_set, false);
+	net.generate_blocks_and_sync(15, 10, &validator_set, false, &mut runtime);
 
 	let mut finality = net.peer(0).client().as_client().finality_notification_stream().fuse();
 
diff --git a/substrate/client/beefy/src/worker.rs b/substrate/client/beefy/src/worker.rs
index 9669939e594c1e0b392d63c856754750668407b2..c82ac65d18296e892b63c8b187f8224be6ae7616 100644
--- a/substrate/client/beefy/src/worker.rs
+++ b/substrate/client/beefy/src/worker.rs
@@ -947,7 +947,7 @@ pub(crate) mod tests {
 		BeefyRPCLinks, KnownPeers,
 	};
 	use beefy_primitives::{known_payloads, mmr::MmrRootProvider};
-	use futures::{executor::block_on, future::poll_fn, task::Poll};
+	use futures::{future::poll_fn, task::Poll};
 	use parking_lot::Mutex;
 	use sc_client_api::{Backend as BackendT, HeaderBackend};
 	use sc_network::NetworkService;
@@ -959,6 +959,7 @@ pub(crate) mod tests {
 		runtime::{Block, Digest, DigestItem, Header, H256},
 		Backend,
 	};
+	use tokio::runtime::Runtime;
 
 	impl<B: super::Block> PersistedState<B> {
 		pub fn voting_oracle(&self) -> &VoterOracle<B> {
@@ -1274,7 +1275,8 @@ pub(crate) mod tests {
 	fn keystore_vs_validator_set() {
 		let keys = &[Keyring::Alice];
 		let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-		let mut net = BeefyTestNet::new(1);
+		let runtime = Runtime::new().unwrap();
+		let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 		let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone());
 
 		// keystore doesn't contain other keys than validators'
@@ -1297,7 +1299,8 @@ pub(crate) mod tests {
 	fn should_finalize_correctly() {
 		let keys = [Keyring::Alice];
 		let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap();
-		let mut net = BeefyTestNet::new(1);
+		let runtime = Runtime::new().unwrap();
+		let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 		let backend = net.peer(0).client().as_backend();
 		let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone());
 		// remove default session, will manually add custom one.
@@ -1320,7 +1323,7 @@ pub(crate) mod tests {
 
 		// no 'best beefy block' or finality proofs
 		assert_eq!(worker.best_beefy_block(), 0);
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending);
 			assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending);
 			Poll::Ready(())
@@ -1341,7 +1344,7 @@ pub(crate) mod tests {
 		worker.finalize(justif.clone()).unwrap();
 		// verify block finalized
 		assert_eq!(worker.best_beefy_block(), 1);
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			// unknown hash -> nothing streamed
 			assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending);
 			// commitment streamed
@@ -1373,7 +1376,7 @@ pub(crate) mod tests {
 		assert_eq!(worker.active_rounds().unwrap().session_start(), 2);
 		// verify block finalized
 		assert_eq!(worker.best_beefy_block(), 2);
-		block_on(poll_fn(move |cx| {
+		runtime.block_on(poll_fn(move |cx| {
 			match best_block_stream.poll_next_unpin(cx) {
 				// expect Some(hash-of-block-2)
 				Poll::Ready(Some(hash)) => {
@@ -1394,7 +1397,8 @@ pub(crate) mod tests {
 	fn should_init_session() {
 		let keys = &[Keyring::Alice, Keyring::Bob];
 		let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-		let mut net = BeefyTestNet::new(1);
+		let runtime = Runtime::new().unwrap();
+		let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 		let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone());
 
 		let worker_rounds = worker.active_rounds().unwrap();
@@ -1425,7 +1429,8 @@ pub(crate) mod tests {
 	fn should_triage_votes_and_process_later() {
 		let keys = &[Keyring::Alice, Keyring::Bob];
 		let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap();
-		let mut net = BeefyTestNet::new(1);
+		let runtime = Runtime::new().unwrap();
+		let mut net = BeefyTestNet::new(runtime.handle().clone(), 1);
 		let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1, validator_set.clone());
 		// remove default session, will manually add custom one.
 		worker.persisted_state.voting_oracle.sessions.clear();
diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml
index dfb6d5c34c37ca96ac801a14212c48b75a0bdb9e..2f079a0c7c56f2eb8bb94c850844cc75980c3f20 100644
--- a/substrate/client/cli/Cargo.toml
+++ b/substrate/client/cli/Cargo.toml
@@ -29,7 +29,7 @@ serde = "1.0.136"
 serde_json = "1.0.85"
 thiserror = "1.0.30"
 tiny-bip39 = "0.8.2"
-tokio = { version = "1.17.0", features = ["signal", "rt-multi-thread", "parking_lot"] }
+tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] }
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" }
 sc-keystore = { version = "4.0.0-dev", path = "../keystore" }
diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml
index 27faa40909713cd3458bf4398a00232151e5572f..47aee0ec084ebc3b87ac8c54f1bac0533c7a5cca 100644
--- a/substrate/client/consensus/aura/Cargo.toml
+++ b/substrate/client/consensus/aura/Cargo.toml
@@ -46,3 +46,4 @@ sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" }
 sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" }
 sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" }
+tokio = { version = "1.22.0" }
diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs
index 839965a556e041902e844a9c0a7d3a3065732abf..46b9124f9077ffb40c73ea1817541c4fd3c6cb33 100644
--- a/substrate/client/consensus/aura/src/lib.rs
+++ b/substrate/client/consensus/aura/src/lib.rs
@@ -633,7 +633,6 @@ where
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use futures::executor;
 	use parking_lot::Mutex;
 	use sc_block_builder::BlockBuilderProvider;
 	use sc_client_api::BlockchainEvents;
@@ -659,6 +658,7 @@ mod tests {
 		runtime::{Header, H256},
 		TestClient,
 	};
+	use tokio::runtime::{Handle, Runtime};
 
 	const SLOT_DURATION_MS: u64 = 1000;
 
@@ -716,11 +716,20 @@ mod tests {
 	>;
 	type AuraPeer = Peer<(), PeersClient>;
 
-	#[derive(Default)]
 	pub struct AuraTestNet {
+		rt_handle: Handle,
 		peers: Vec<AuraPeer>,
 	}
 
+	impl WithRuntime for AuraTestNet {
+		fn with_runtime(rt_handle: Handle) -> Self {
+			AuraTestNet { rt_handle, peers: Vec::new() }
+		}
+		fn rt_handle(&self) -> &Handle {
+			&self.rt_handle
+		}
+	}
+
 	impl TestNetFactory for AuraTestNet {
 		type Verifier = AuraVerifier;
 		type PeerData = ();
@@ -772,7 +781,8 @@ mod tests {
 	#[test]
 	fn authoring_blocks() {
 		sp_tracing::try_init_simple();
-		let net = AuraTestNet::new(3);
+		let runtime = Runtime::new().unwrap();
+		let net = AuraTestNet::new(runtime.handle().clone(), 3);
 
 		let peers = &[(0, Keyring::Alice), (1, Keyring::Bob), (2, Keyring::Charlie)];
 
@@ -838,7 +848,7 @@ mod tests {
 			);
 		}
 
-		executor::block_on(future::select(
+		runtime.block_on(future::select(
 			future::poll_fn(move |cx| {
 				net.lock().poll(cx);
 				Poll::<()>::Pending
@@ -865,7 +875,8 @@ mod tests {
 
 	#[test]
 	fn current_node_authority_should_claim_slot() {
-		let net = AuraTestNet::new(4);
+		let runtime = Runtime::new().unwrap();
+		let net = AuraTestNet::new(runtime.handle().clone(), 4);
 
 		let mut authorities = vec![
 			Keyring::Alice.public().into(),
@@ -909,19 +920,20 @@ mod tests {
 			Default::default(),
 			Default::default(),
 		);
-		assert!(executor::block_on(worker.claim_slot(&head, 0.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 1.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 2.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 3.into(), &authorities)).is_some());
-		assert!(executor::block_on(worker.claim_slot(&head, 4.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 5.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 6.into(), &authorities)).is_none());
-		assert!(executor::block_on(worker.claim_slot(&head, 7.into(), &authorities)).is_some());
+		assert!(runtime.block_on(worker.claim_slot(&head, 0.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 1.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 2.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 3.into(), &authorities)).is_some());
+		assert!(runtime.block_on(worker.claim_slot(&head, 4.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 5.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 6.into(), &authorities)).is_none());
+		assert!(runtime.block_on(worker.claim_slot(&head, 7.into(), &authorities)).is_some());
 	}
 
 	#[test]
 	fn on_slot_returns_correct_block() {
-		let net = AuraTestNet::new(4);
+		let runtime = Runtime::new().unwrap();
+		let net = AuraTestNet::new(runtime.handle().clone(), 4);
 
 		let keystore_path = tempfile::tempdir().expect("Creates keystore path");
 		let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore.");
@@ -957,15 +969,16 @@ mod tests {
 
 		let head = client.header(&BlockId::Number(0)).unwrap().unwrap();
 
-		let res = executor::block_on(worker.on_slot(SlotInfo {
-			slot: 0.into(),
-			ends_at: Instant::now() + Duration::from_secs(100),
-			create_inherent_data: Box::new(()),
-			duration: Duration::from_millis(1000),
-			chain_head: head,
-			block_size_limit: None,
-		}))
-		.unwrap();
+		let res = runtime
+			.block_on(worker.on_slot(SlotInfo {
+				slot: 0.into(),
+				ends_at: Instant::now() + Duration::from_secs(100),
+				create_inherent_data: Box::new(()),
+				duration: Duration::from_millis(1000),
+				chain_head: head,
+				block_size_limit: None,
+			}))
+			.unwrap();
 
 		// The returned block should be imported and we should be able to get its header by now.
 		assert!(client.header(&BlockId::Hash(res.block.hash())).unwrap().is_some());
diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml
index 01d7d897b4ba4e2d64432413dd227464f28aa790..c39802ba237ae752cfdaffab527b693734985718 100644
--- a/substrate/client/consensus/babe/Cargo.toml
+++ b/substrate/client/consensus/babe/Cargo.toml
@@ -58,3 +58,4 @@ sc-network-test = { version = "0.8.0", path = "../../network/test" }
 sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" }
 sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" }
+tokio = "1.22.0"
diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml
index 8e76b1400506350256fc8c9d17259b9579761219..d0a65a3fc31936c62d2fe473fb4ba9b31cf19fed 100644
--- a/substrate/client/consensus/babe/rpc/Cargo.toml
+++ b/substrate/client/consensus/babe/rpc/Cargo.toml
@@ -32,7 +32,7 @@ sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" }
 [dev-dependencies]
 serde_json = "1.0.85"
 tempfile = "3.1.0"
-tokio = "1.17.0"
+tokio = "1.22.0"
 sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" }
 sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" }
 sp-keyring = { version = "7.0.0", path = "../../../../primitives/keyring" }
diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs
index 8bef1b38b929d6ebd4c474e4ca8236de24b66cff..7f51eb2c51977146e8d556803d6a51936934dc45 100644
--- a/substrate/client/consensus/babe/src/tests.rs
+++ b/substrate/client/consensus/babe/src/tests.rs
@@ -20,7 +20,6 @@
 
 use super::*;
 use authorship::claim_slot;
-use futures::executor::block_on;
 use log::debug;
 use rand_chacha::{
 	rand_core::{RngCore, SeedableRng},
@@ -50,6 +49,7 @@ use sp_runtime::{
 };
 use sp_timestamp::Timestamp;
 use std::{cell::RefCell, task::Poll, time::Duration};
+use tokio::runtime::{Handle, Runtime};
 
 type Item = DigestItem;
 
@@ -227,11 +227,20 @@ where
 
 type BabePeer = Peer<Option<PeerData>, BabeBlockImport>;
 
-#[derive(Default)]
 pub struct BabeTestNet {
+	rt_handle: Handle,
 	peers: Vec<BabePeer>,
 }
 
+impl WithRuntime for BabeTestNet {
+	fn with_runtime(rt_handle: Handle) -> Self {
+		BabeTestNet { rt_handle, peers: Vec::new() }
+	}
+	fn rt_handle(&self) -> &Handle {
+		&self.rt_handle
+	}
+}
+
 type TestHeader = <TestBlock as BlockT>::Header;
 
 type TestSelectChain =
@@ -361,7 +370,8 @@ impl TestNetFactory for BabeTestNet {
 #[should_panic]
 fn rejects_empty_block() {
 	sp_tracing::try_init_simple();
-	let mut net = BabeTestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 3);
 	let block_builder = |builder: BlockBuilder<_, _, _>| builder.build().unwrap().block;
 	net.mut_peers(|peer| {
 		peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder);
@@ -380,7 +390,9 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static
 	let mutator = Arc::new(mutator) as Mutator;
 
 	MUTATOR.with(|m| *m.borrow_mut() = mutator.clone());
-	let net = BabeTestNet::new(3);
+
+	let runtime = Runtime::new().unwrap();
+	let net = BabeTestNet::new(runtime.handle().clone(), 3);
 
 	let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie];
 
@@ -457,7 +469,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static
 			.expect("Starts babe"),
 		);
 	}
-	block_on(future::select(
+	runtime.block_on(future::select(
 		futures::future::poll_fn(move |cx| {
 			let mut net = net.lock();
 			net.poll(cx);
@@ -594,8 +606,9 @@ fn propose_and_import_block<Transaction: Send + 'static>(
 	slot: Option<Slot>,
 	proposer_factory: &mut DummyFactory,
 	block_import: &mut BoxBlockImport<TestBlock, Transaction>,
+	runtime: &Runtime,
 ) -> Hash {
-	let mut proposer = block_on(proposer_factory.init(parent)).unwrap();
+	let mut proposer = runtime.block_on(proposer_factory.init(parent)).unwrap();
 
 	let slot = slot.unwrap_or_else(|| {
 		let parent_pre_digest = find_pre_digest::<TestBlock>(parent).unwrap();
@@ -611,7 +624,7 @@ fn propose_and_import_block<Transaction: Send + 'static>(
 
 	let parent_hash = parent.hash();
 
-	let mut block = block_on(proposer.propose_with(pre_digest)).unwrap().block;
+	let mut block = runtime.block_on(proposer.propose_with(pre_digest)).unwrap().block;
 
 	let epoch_descriptor = proposer_factory
 		.epoch_changes
@@ -647,7 +660,8 @@ fn propose_and_import_block<Transaction: Send + 'static>(
 	import
 		.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate::<TestBlock> { epoch_descriptor });
 	import.fork_choice = Some(ForkChoiceStrategy::LongestChain);
-	let import_result = block_on(block_import.import_block(import, Default::default())).unwrap();
+	let import_result =
+		runtime.block_on(block_import.import_block(import, Default::default())).unwrap();
 
 	match import_result {
 		ImportResult::Imported(_) => {},
@@ -666,13 +680,14 @@ fn propose_and_import_blocks<Transaction: Send + 'static>(
 	block_import: &mut BoxBlockImport<TestBlock, Transaction>,
 	parent_id: BlockId<TestBlock>,
 	n: usize,
+	runtime: &Runtime,
 ) -> Vec<Hash> {
 	let mut hashes = Vec::with_capacity(n);
 	let mut parent_header = client.header(&parent_id).unwrap().unwrap();
 
 	for _ in 0..n {
 		let block_hash =
-			propose_and_import_block(&parent_header, None, proposer_factory, block_import);
+			propose_and_import_block(&parent_header, None, proposer_factory, block_import, runtime);
 		hashes.push(block_hash);
 		parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap();
 	}
@@ -682,7 +697,8 @@ fn propose_and_import_blocks<Transaction: Send + 'static>(
 
 #[test]
 fn importing_block_one_sets_genesis_epoch() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -704,6 +720,7 @@ fn importing_block_one_sets_genesis_epoch() {
 		Some(999.into()),
 		&mut proposer_factory,
 		&mut block_import,
+		&runtime,
 	);
 
 	let genesis_epoch = Epoch::genesis(&data.link.config, 999.into());
@@ -721,7 +738,8 @@ fn importing_block_one_sets_genesis_epoch() {
 
 #[test]
 fn revert_prunes_epoch_changes_and_removes_weights() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -739,7 +757,14 @@ fn revert_prunes_epoch_changes_and_removes_weights() {
 	};
 
 	let mut propose_and_import_blocks_wrap = |parent_id, n| {
-		propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
+		propose_and_import_blocks(
+			&client,
+			&mut proposer_factory,
+			&mut block_import,
+			parent_id,
+			n,
+			&runtime,
+		)
 	};
 
 	// Test scenario.
@@ -801,7 +826,8 @@ fn revert_prunes_epoch_changes_and_removes_weights() {
 
 #[test]
 fn revert_not_allowed_for_finalized() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -818,7 +844,14 @@ fn revert_not_allowed_for_finalized() {
 	};
 
 	let mut propose_and_import_blocks_wrap = |parent_id, n| {
-		propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
+		propose_and_import_blocks(
+			&client,
+			&mut proposer_factory,
+			&mut block_import,
+			parent_id,
+			n,
+			&runtime,
+		)
 	};
 
 	let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3);
@@ -839,7 +872,8 @@ fn revert_not_allowed_for_finalized() {
 
 #[test]
 fn importing_epoch_change_block_prunes_tree() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -856,7 +890,14 @@ fn importing_epoch_change_block_prunes_tree() {
 	};
 
 	let mut propose_and_import_blocks_wrap = |parent_id, n| {
-		propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
+		propose_and_import_blocks(
+			&client,
+			&mut proposer_factory,
+			&mut block_import,
+			parent_id,
+			n,
+			&runtime,
+		)
 	};
 
 	// This is the block tree that we're going to use in this test. Each node
@@ -916,7 +957,8 @@ fn importing_epoch_change_block_prunes_tree() {
 #[test]
 #[should_panic]
 fn verify_slots_are_strictly_increasing() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -939,13 +981,20 @@ fn verify_slots_are_strictly_increasing() {
 		Some(999.into()),
 		&mut proposer_factory,
 		&mut block_import,
+		&runtime,
 	);
 
 	let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap();
 
 	// we should fail to import this block since the slot number didn't increase.
 	// we will panic due to the `PanickingBlockImport` defined above.
-	propose_and_import_block(&b1, Some(999.into()), &mut proposer_factory, &mut block_import);
+	propose_and_import_block(
+		&b1,
+		Some(999.into()),
+		&mut proposer_factory,
+		&mut block_import,
+		&runtime,
+	);
 }
 
 #[test]
@@ -980,7 +1029,8 @@ fn babe_transcript_generation_match() {
 
 #[test]
 fn obsolete_blocks_aux_data_cleanup() {
-	let mut net = BabeTestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = BabeTestNet::new(runtime.handle().clone(), 1);
 
 	let peer = net.peer(0);
 	let data = peer.data.as_ref().expect("babe link set up during initialization");
@@ -1003,7 +1053,14 @@ fn obsolete_blocks_aux_data_cleanup() {
 	let mut block_import = data.block_import.lock().take().expect("import set up during init");
 
 	let mut propose_and_import_blocks_wrap = |parent_id, n| {
-		propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
+		propose_and_import_blocks(
+			&client,
+			&mut proposer_factory,
+			&mut block_import,
+			parent_id,
+			n,
+			&runtime,
+		)
 	};
 
 	let aux_data_check = |hashes: &[Hash], expected: bool| {
diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml
index a066de75f7defd5e0fbc9f8fa6b7d04d2491a5c4..cf151424c2ee5419a1e861ac65373ce62c81c14f 100644
--- a/substrate/client/consensus/manual-seal/Cargo.toml
+++ b/substrate/client/consensus/manual-seal/Cargo.toml
@@ -42,7 +42,7 @@ sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" }
 sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" }
 
 [dev-dependencies]
-tokio = { version = "1.17.0", features = ["rt-multi-thread", "macros"] }
+tokio = { version = "1.22.0", features = ["rt-multi-thread", "macros"] }
 sc-basic-authorship = { version = "0.10.0-dev", path = "../../basic-authorship" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" }
 substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../../test-utils/runtime/transaction-pool" }
diff --git a/substrate/client/finality-grandpa/Cargo.toml b/substrate/client/finality-grandpa/Cargo.toml
index b14d40659783b50a517a3128528b8cf789180285..0d5b8eaca5becdb4a961d7065c25eacf6a07eac7 100644
--- a/substrate/client/finality-grandpa/Cargo.toml
+++ b/substrate/client/finality-grandpa/Cargo.toml
@@ -53,7 +53,7 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 assert_matches = "1.3.0"
 finality-grandpa = { version = "0.16.0", features = ["derive-codec", "test-helpers"] }
 serde = "1.0.136"
-tokio = "1.17.0"
+tokio = "1.22.0"
 sc-network = { version = "0.10.0-dev", path = "../network" }
 sc-network-test = { version = "0.8.0", path = "../network/test" }
 sp-keyring = { version = "7.0.0", path = "../../primitives/keyring" }
diff --git a/substrate/client/finality-grandpa/rpc/Cargo.toml b/substrate/client/finality-grandpa/rpc/Cargo.toml
index 2d8a527ccef8530c65f8174125ea95e1077536b1..7be77c122bab2cd9a0079276b5eb4e8f8ff61ab8 100644
--- a/substrate/client/finality-grandpa/rpc/Cargo.toml
+++ b/substrate/client/finality-grandpa/rpc/Cargo.toml
@@ -34,4 +34,4 @@ sp-core = { version = "7.0.0", path = "../../../primitives/core" }
 sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" }
 sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" }
-tokio = { version = "1.17.0", features = ["macros"] }
+tokio = { version = "1.22.0", features = ["macros"] }
diff --git a/substrate/client/finality-grandpa/src/tests.rs b/substrate/client/finality-grandpa/src/tests.rs
index 93d20110ff5af610858ff18b889128fcf50abb35..6b577fd7129302e346a461484710271ef07717bc 100644
--- a/substrate/client/finality-grandpa/src/tests.rs
+++ b/substrate/client/finality-grandpa/src/tests.rs
@@ -21,7 +21,6 @@
 use super::*;
 use assert_matches::assert_matches;
 use environment::HasVoted;
-use futures::executor::block_on;
 use futures_timer::Delay;
 use parking_lot::{Mutex, RwLock};
 use sc_consensus::{
@@ -31,7 +30,7 @@ use sc_consensus::{
 use sc_network::config::Role;
 use sc_network_test::{
 	Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient,
-	PeersFullClient, TestClient, TestNetFactory,
+	PeersFullClient, TestClient, TestNetFactory, WithRuntime,
 };
 use sp_api::{ApiRef, ProvideRuntimeApi};
 use sp_blockchain::Result;
@@ -72,16 +71,26 @@ type GrandpaBlockImport = crate::GrandpaBlockImport<
 	LongestChain<substrate_test_runtime_client::Backend, Block>,
 >;
 
-#[derive(Default)]
 struct GrandpaTestNet {
 	peers: Vec<GrandpaPeer>,
 	test_config: TestApi,
+	rt_handle: Handle,
+}
+
+impl WithRuntime for GrandpaTestNet {
+	fn with_runtime(rt_handle: Handle) -> Self {
+		GrandpaTestNet { peers: Vec::new(), test_config: TestApi::default(), rt_handle }
+	}
+	fn rt_handle(&self) -> &Handle {
+		&self.rt_handle
+	}
 }
 
 impl GrandpaTestNet {
-	fn new(test_config: TestApi, n_authority: usize, n_full: usize) -> Self {
-		let mut net =
-			GrandpaTestNet { peers: Vec::with_capacity(n_authority + n_full), test_config };
+	fn new(test_config: TestApi, n_authority: usize, n_full: usize, rt_handle: Handle) -> Self {
+		let mut net = GrandpaTestNet::with_runtime(rt_handle);
+		net.peers = Vec::with_capacity(n_authority + n_full);
+		net.test_config = test_config;
 
 		for _ in 0..n_authority {
 			net.add_authority_peer();
@@ -359,10 +368,10 @@ fn finalize_3_voters_no_observers() {
 	let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie];
 	let voters = make_ids(peers);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0);
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0, runtime.handle().clone());
 	runtime.spawn(initialize_grandpa(&mut net, peers));
 	net.peer(0).push_blocks(20, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let hashof20 = net.peer(0).client().info().best_hash;
 
 	for i in 0..3 {
@@ -387,7 +396,7 @@ fn finalize_3_voters_1_full_observer() {
 	let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie];
 	let voters = make_ids(peers);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1);
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1, runtime.handle().clone());
 	runtime.spawn(initialize_grandpa(&mut net, peers));
 
 	runtime.spawn({
@@ -469,9 +478,8 @@ fn transition_3_voters_twice_1_full_observer() {
 	let genesis_voters = make_ids(peers_a);
 
 	let api = TestApi::new(genesis_voters);
-	let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8, 1)));
-
 	let mut runtime = Runtime::new().unwrap();
+	let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8, 1, runtime.handle().clone())));
 
 	let mut voters = Vec::new();
 	for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() {
@@ -508,7 +516,7 @@ fn transition_3_voters_twice_1_full_observer() {
 	}
 
 	net.lock().peer(0).push_blocks(1, false);
-	net.lock().block_until_sync();
+	runtime.block_on(net.lock().wait_until_sync());
 
 	for (i, peer) in net.lock().peers().iter().enumerate() {
 		let full_client = peer.client().as_client();
@@ -608,10 +616,10 @@ fn justification_is_generated_periodically() {
 	let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie];
 	let voters = make_ids(peers);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0);
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0, runtime.handle().clone());
 	runtime.spawn(initialize_grandpa(&mut net, peers));
 	net.peer(0).push_blocks(32, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	let hashof32 = net.peer(0).client().info().best_hash;
 
@@ -634,7 +642,7 @@ fn sync_justifications_on_change_blocks() {
 
 	// 4 peers, 3 of them are authorities and participate in grandpa
 	let api = TestApi::new(voters);
-	let mut net = GrandpaTestNet::new(api, 3, 1);
+	let mut net = GrandpaTestNet::new(api, 3, 1, runtime.handle().clone());
 	let voters = initialize_grandpa(&mut net, peers_a);
 
 	// add 20 blocks
@@ -652,7 +660,7 @@ fn sync_justifications_on_change_blocks() {
 
 	// add more blocks on top of it (until we have 25)
 	net.peer(0).push_blocks(4, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	for i in 0..4 {
 		assert_eq!(net.peer(i).client().info().best_number, 25, "Peer #{} failed to sync", i);
@@ -702,7 +710,7 @@ fn finalizes_multiple_pending_changes_in_order() {
 	// but all of them will be part of the voter set eventually so they should be
 	// all added to the network as authorities
 	let api = TestApi::new(genesis_voters);
-	let mut net = GrandpaTestNet::new(api, 6, 0);
+	let mut net = GrandpaTestNet::new(api, 6, 0, runtime.handle().clone());
 	runtime.spawn(initialize_grandpa(&mut net, all_peers));
 
 	// add 20 blocks
@@ -734,7 +742,7 @@ fn finalizes_multiple_pending_changes_in_order() {
 	// add more blocks on top of it (until we have 30)
 	net.peer(0).push_blocks(4, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	// all peers imported both change blocks
 	for i in 0..6 {
@@ -761,7 +769,7 @@ fn force_change_to_new_set() {
 	let api = TestApi::new(make_ids(genesis_authorities));
 
 	let voters = make_ids(peers_a);
-	let mut net = GrandpaTestNet::new(api, 3, 0);
+	let mut net = GrandpaTestNet::new(api, 3, 0, runtime.handle().clone());
 	let voters_future = initialize_grandpa(&mut net, peers_a);
 	let net = Arc::new(Mutex::new(net));
 
@@ -785,7 +793,7 @@ fn force_change_to_new_set() {
 	});
 
 	net.lock().peer(0).push_blocks(25, false);
-	net.lock().block_until_sync();
+	runtime.block_on(net.lock().wait_until_sync());
 
 	for (i, peer) in net.lock().peers().iter().enumerate() {
 		assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i);
@@ -811,7 +819,8 @@ fn allows_reimporting_change_blocks() {
 	let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob];
 	let voters = make_ids(peers_a);
 	let api = TestApi::new(voters);
-	let mut net = GrandpaTestNet::new(api.clone(), 3, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(api.clone(), 3, 0, runtime.handle().clone());
 
 	let client = net.peer(0).client().clone();
 	let (mut block_import, ..) = net.make_block_import(client.clone());
@@ -836,7 +845,7 @@ fn allows_reimporting_change_blocks() {
 	};
 
 	assert_eq!(
-		block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
+		runtime.block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
 		ImportResult::Imported(ImportedAux {
 			needs_justification: true,
 			clear_justification_requests: false,
@@ -847,7 +856,7 @@ fn allows_reimporting_change_blocks() {
 	);
 
 	assert_eq!(
-		block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
+		runtime.block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
 		ImportResult::AlreadyInChain
 	);
 }
@@ -858,7 +867,8 @@ fn test_bad_justification() {
 	let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob];
 	let voters = make_ids(peers_a);
 	let api = TestApi::new(voters);
-	let mut net = GrandpaTestNet::new(api.clone(), 3, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(api.clone(), 3, 0, runtime.handle().clone());
 
 	let client = net.peer(0).client().clone();
 	let (mut block_import, ..) = net.make_block_import(client.clone());
@@ -885,7 +895,7 @@ fn test_bad_justification() {
 	};
 
 	assert_eq!(
-		block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
+		runtime.block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
 		ImportResult::Imported(ImportedAux {
 			needs_justification: true,
 			clear_justification_requests: false,
@@ -896,7 +906,7 @@ fn test_bad_justification() {
 	);
 
 	assert_eq!(
-		block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
+		runtime.block_on(block_import.import_block(block(), HashMap::new())).unwrap(),
 		ImportResult::AlreadyInChain
 	);
 }
@@ -915,7 +925,7 @@ fn voter_persists_its_votes() {
 	let voters = make_ids(peers);
 
 	// alice has a chain with 20 blocks
-	let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2, 0);
+	let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2, 0, runtime.handle().clone());
 
 	// create the communication layer for bob, but don't start any
 	// voter. instead we'll listen for the prevote that alice casts
@@ -1035,7 +1045,7 @@ fn voter_persists_its_votes() {
 	runtime.spawn(alice_voter1);
 
 	net.peer(0).push_blocks(20, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	assert_eq!(net.peer(0).client().info().best_number, 20, "Peer #{} failed to sync", 0);
 
@@ -1164,7 +1174,7 @@ fn finalize_3_voters_1_light_observer() {
 	let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie];
 	let voters = make_ids(authorities);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1);
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1, runtime.handle().clone());
 	let voters = initialize_grandpa(&mut net, authorities);
 	let observer = observer::run_grandpa_observer(
 		Config {
@@ -1182,7 +1192,7 @@ fn finalize_3_voters_1_light_observer() {
 	)
 	.unwrap();
 	net.peer(0).push_blocks(20, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	for i in 0..4 {
 		assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i);
@@ -1203,7 +1213,7 @@ fn voter_catches_up_to_latest_round_when_behind() {
 	let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob];
 	let voters = make_ids(peers);
 
-	let net = GrandpaTestNet::new(TestApi::new(voters), 2, 0);
+	let net = GrandpaTestNet::new(TestApi::new(voters), 2, 0, runtime.handle().clone());
 
 	let net = Arc::new(Mutex::new(net));
 	let mut finality_notifications = Vec::new();
@@ -1259,7 +1269,7 @@ fn voter_catches_up_to_latest_round_when_behind() {
 	}
 
 	net.lock().peer(0).push_blocks(50, false);
-	net.lock().block_until_sync();
+	runtime.block_on(net.lock().wait_until_sync());
 
 	// wait for them to finalize block 50. since they'll vote on 3/4 of the
 	// unfinalized chain it will take at least 4 rounds to do it.
@@ -1367,7 +1377,8 @@ fn grandpa_environment_respects_voting_rules() {
 	let peers = &[Ed25519Keyring::Alice];
 	let voters = make_ids(peers);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0, runtime.handle().clone());
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let link = peer.data.lock().take().unwrap();
@@ -1397,7 +1408,8 @@ fn grandpa_environment_respects_voting_rules() {
 
 	// the unrestricted environment should just return the best block
 	assert_eq!(
-		block_on(unrestricted_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(unrestricted_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1407,7 +1419,8 @@ fn grandpa_environment_respects_voting_rules() {
 	// both the other environments should return block 16, which is 3/4 of the
 	// way in the unfinalized chain
 	assert_eq!(
-		block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1415,7 +1428,8 @@ fn grandpa_environment_respects_voting_rules() {
 	);
 
 	assert_eq!(
-		block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1432,7 +1446,8 @@ fn grandpa_environment_respects_voting_rules() {
 
 	// the 3/4 environment should propose block 21 for voting
 	assert_eq!(
-		block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1442,7 +1457,8 @@ fn grandpa_environment_respects_voting_rules() {
 	// while the default environment will always still make sure we don't vote
 	// on the best block (2 behind)
 	assert_eq!(
-		block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1461,7 +1477,8 @@ fn grandpa_environment_respects_voting_rules() {
 	// best block, there's a hard rule that we can't cast any votes lower than
 	// the given base (#21).
 	assert_eq!(
-		block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
+		runtime
+			.block_on(default_env.best_chain_containing(peer.client().info().finalized_hash))
 			.unwrap()
 			.unwrap()
 			.1,
@@ -1476,7 +1493,8 @@ fn grandpa_environment_never_overwrites_round_voter_state() {
 	let peers = &[Ed25519Keyring::Alice];
 	let voters = make_ids(peers);
 
-	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0, runtime.handle().clone());
 	let peer = net.peer(0);
 	let network_service = peer.network_service().clone();
 	let link = peer.data.lock().take().unwrap();
@@ -1539,7 +1557,8 @@ fn justification_with_equivocation() {
 	let pairs = (0..100).map(|n| AuthorityPair::from_seed(&[n; 32])).collect::<Vec<_>>();
 	let voters = pairs.iter().map(AuthorityPair::public).map(|id| (id, 1)).collect::<Vec<_>>();
 	let api = TestApi::new(voters.clone());
-	let mut net = GrandpaTestNet::new(api.clone(), 1, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(api.clone(), 1, 0, runtime.handle().clone());
 
 	// we create a basic chain with 3 blocks (no forks)
 	net.peer(0).push_blocks(3, false);
@@ -1606,7 +1625,8 @@ fn imports_justification_for_regular_blocks_on_import() {
 	let peers = &[Ed25519Keyring::Alice];
 	let voters = make_ids(peers);
 	let api = TestApi::new(voters);
-	let mut net = GrandpaTestNet::new(api.clone(), 1, 0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = GrandpaTestNet::new(api.clone(), 1, 0, runtime.handle().clone());
 
 	let client = net.peer(0).client().clone();
 	let (mut block_import, ..) = net.make_block_import(client.clone());
@@ -1655,7 +1675,7 @@ fn imports_justification_for_regular_blocks_on_import() {
 	import.fork_choice = Some(ForkChoiceStrategy::LongestChain);
 
 	assert_eq!(
-		block_on(block_import.import_block(import, HashMap::new())).unwrap(),
+		runtime.block_on(block_import.import_block(import, HashMap::new())).unwrap(),
 		ImportResult::Imported(ImportedAux {
 			needs_justification: false,
 			clear_justification_requests: false,
@@ -1676,8 +1696,10 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() {
 	let alice = Ed25519Keyring::Alice;
 	let voters = make_ids(&[alice]);
 
+	let runtime = Runtime::new().unwrap();
+
 	let environment = {
-		let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0);
+		let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0, runtime.handle().clone());
 		let peer = net.peer(0);
 		let network_service = peer.network_service().clone();
 		let link = peer.data.lock().take().unwrap();
@@ -1734,7 +1756,8 @@ fn revert_prunes_authority_changes() {
 	};
 
 	let api = TestApi::new(make_ids(peers));
-	let mut net = GrandpaTestNet::new(api, 3, 0);
+
+	let mut net = GrandpaTestNet::new(api, 3, 0, runtime.handle().clone());
 	runtime.spawn(initialize_grandpa(&mut net, peers));
 
 	let peer = net.peer(0);
diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml
index 3630c424149645b6c4873eef70d42118c77771e8..6e8cb8194e48c070ad21062035e1890f158ebdc8 100644
--- a/substrate/client/merkle-mountain-range/Cargo.toml
+++ b/substrate/client/merkle-mountain-range/Cargo.toml
@@ -29,4 +29,3 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 tokio = "1.17.0"
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
-async-std = { version = "1.11.0", default-features = false }
diff --git a/substrate/client/merkle-mountain-range/src/lib.rs b/substrate/client/merkle-mountain-range/src/lib.rs
index cb13977ffa5bd3e749c4e90203ccc78fc36907d4..59f26b4265708b7935e5fafd261300aabc13842c 100644
--- a/substrate/client/merkle-mountain-range/src/lib.rs
+++ b/substrate/client/merkle-mountain-range/src/lib.rs
@@ -201,7 +201,7 @@ mod tests {
 			let a2 = client.import_block(&BlockId::Hash(a1.hash()), b"a2", Some(1)).await;
 
 			client.finalize_block(a1.hash(), Some(1));
-			async_std::task::sleep(Duration::from_millis(200)).await;
+			tokio::time::sleep(Duration::from_millis(200)).await;
 			// expected finalized heads: a1
 			client.assert_canonicalized(&[&a1]);
 			client.assert_not_pruned(&[&a2]);
@@ -221,7 +221,7 @@ mod tests {
 			let a6 = client.import_block(&BlockId::Hash(a5.hash()), b"a6", Some(2)).await;
 
 			client.finalize_block(a5.hash(), Some(2));
-			async_std::task::sleep(Duration::from_millis(200)).await;
+			tokio::time::sleep(Duration::from_millis(200)).await;
 			// expected finalized heads: a4, a5
 			client.assert_canonicalized(&[&a4, &a5]);
 			client.assert_not_pruned(&[&a6]);
@@ -240,7 +240,7 @@ mod tests {
 			// Simulate the case where the runtime says that there are 2 mmr_blocks when in fact
 			// there is only 1.
 			client.finalize_block(a1.hash(), Some(2));
-			async_std::task::sleep(Duration::from_millis(200)).await;
+			tokio::time::sleep(Duration::from_millis(200)).await;
 			// expected finalized heads: -
 			client.assert_not_canonicalized(&[&a1]);
 		});
diff --git a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs
index f42dfc0cae3db9ca2e95011f9af307d51fd5caa3..1cdd3810b4c52c61b13d912bd0cfd15584e5d8af 100644
--- a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs
+++ b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs
@@ -228,7 +228,7 @@ mod tests {
 			let d5 = client.import_block(&BlockId::Hash(d4.hash()), b"d5", Some(4)).await;
 
 			client.finalize_block(a3.hash(), Some(3));
-			async_std::task::sleep(Duration::from_millis(200)).await;
+			tokio::time::sleep(Duration::from_millis(200)).await;
 			// expected finalized heads: a1, a2, a3
 			client.assert_canonicalized(&[&a1, &a2, &a3]);
 			// expected stale heads: c1
@@ -236,7 +236,7 @@ mod tests {
 			client.assert_pruned(&[&c1, &b1]);
 
 			client.finalize_block(d5.hash(), None);
-			async_std::task::sleep(Duration::from_millis(200)).await;
+			tokio::time::sleep(Duration::from_millis(200)).await;
 			// expected finalized heads: d4, d5,
 			client.assert_canonicalized(&[&d4, &d5]);
 			// expected stale heads: b1, b2, b3, a4
diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs
index 0ba297c2808d21fbf51193d3d495d57a890ea502..b854686b2dc86f045d8bacfa8b1457b370cff8f2 100644
--- a/substrate/client/merkle-mountain-range/src/test_utils.rs
+++ b/substrate/client/merkle-mountain-range/src/test_utils.rs
@@ -16,7 +16,6 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use futures::{executor::LocalPool, task::LocalSpawn, FutureExt};
 use std::{
 	future::Future,
 	sync::{Arc, Mutex},
@@ -316,28 +315,17 @@ where
 	F: FnOnce(Arc<MockClient>) -> Fut + 'static,
 	Fut: Future<Output = ()>,
 {
-	let mut pool = LocalPool::new();
+	let runtime = tokio::runtime::Runtime::new().unwrap();
 	let client = Arc::new(MockClient::new());
 
 	let client_clone = client.clone();
-	pool.spawner()
-		.spawn_local_obj(
-			async move {
-				let backend = client_clone.backend.clone();
-				MmrGadget::start(
-					client_clone.clone(),
-					backend,
-					MockRuntimeApi::INDEXING_PREFIX.to_vec(),
-				)
-				.await
-			}
-			.boxed_local()
-			.into(),
-		)
-		.unwrap();
+	runtime.spawn(async move {
+		let backend = client_clone.backend.clone();
+		MmrGadget::start(client_clone, backend, MockRuntimeApi::INDEXING_PREFIX.to_vec()).await
+	});
 
-	pool.run_until(async move {
-		async_std::task::sleep(Duration::from_millis(200)).await;
+	runtime.block_on(async move {
+		tokio::time::sleep(Duration::from_millis(200)).await;
 
 		f(client).await
 	});
diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml
index 31930515ff11828fe3fb9c493d0b06fa97643f9d..c40a830f9219b490c82ef5c0f6e45e9e5b5c0da3 100644
--- a/substrate/client/network-gossip/Cargo.toml
+++ b/substrate/client/network-gossip/Cargo.toml
@@ -27,6 +27,6 @@ sc-peerset = { version = "4.0.0-dev", path = "../peerset" }
 sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 
 [dev-dependencies]
-async-std = "1.11.0"
+tokio = "1.22.0"
 quickcheck = { version = "1.0.3", default-features = false }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 5563b3be35e8d455561198894b27e85b72222348..462677f53c5fd70cd00bc2a2b25de6b302c10bb4 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -308,7 +308,6 @@ impl<B: BlockT> futures::future::FusedFuture for GossipEngine<B> {
 mod tests {
 	use super::*;
 	use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext};
-	use async_std::task::spawn;
 	use futures::{
 		channel::mpsc::{unbounded, UnboundedSender},
 		executor::{block_on, block_on_stream},
@@ -490,8 +489,8 @@ mod tests {
 		}))
 	}
 
-	#[test]
-	fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() {
+	#[tokio::test(flavor = "multi_thread")]
+	async fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() {
 		let topic = H256::default();
 		let protocol = ProtocolName::from("/my_protocol");
 		let remote_peer = PeerId::random();
@@ -541,8 +540,10 @@ mod tests {
 			.start_send(events[1].clone())
 			.expect("Event stream is unbounded; qed.");
 
-		spawn(gossip_engine);
+		tokio::spawn(gossip_engine);
 
+		// Note: `block_on_stream()`-derived iterator will block the current thread,
+		//       so we need a `multi_thread` `tokio::test` runtime flavor.
 		let mut subscribers =
 			subscribers.into_iter().map(|s| block_on_stream(s)).collect::<Vec<_>>();
 
diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml
index afd98801480818aa8c9257b633d838c1aa2d29b3..1959e24bd680f1efdb0758581d2e72b821b6bb13 100644
--- a/substrate/client/network/Cargo.toml
+++ b/substrate/client/network/Cargo.toml
@@ -26,7 +26,7 @@ fnv = "1.0.6"
 futures = "0.3.21"
 futures-timer = "3.0.2"
 ip_network = "0.4.1"
-libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad", "mdns-async-io", "mplex", "noise", "ping", "tcp", "yamux", "websocket"] }
+libp2p = { version = "0.49.0", features = ["dns", "identify", "kad", "mdns", "mplex", "noise", "ping", "tcp", "tokio", "yamux", "websocket"] }
 linked_hash_set = "0.1.3"
 linked-hash-map = "0.5.4"
 log = "0.4.17"
@@ -57,9 +57,10 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 
 [dev-dependencies]
 assert_matches = "1.3"
-async-std = { version = "1.11.0", features = ["attributes"] }
 rand = "0.7.2"
 tempfile = "3.1.0"
+tokio = { version = "1.22.0", features = ["macros"] }
+tokio-util = { version = "0.7.4", features = ["compat"] }
 sc-network-light = { version = "0.10.0-dev", path = "./light" }
 sc-network-sync = { version = "0.10.0-dev", path = "./sync" }
 sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" }
diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml
index 9793eeae51b26069a27260bb4c989b9c049048db..02e12e8f91653c4e5c647e4fc4b354d5f0d5f6e7 100644
--- a/substrate/client/network/bitswap/Cargo.toml
+++ b/substrate/client/network/bitswap/Cargo.toml
@@ -31,7 +31,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain"
 sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" }
 
 [dev-dependencies]
-tokio = { version = "1", features = ["full"] }
+tokio = { version = "1.22.0", features = ["full"] }
 sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" }
 sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" }
 sp-core = { version = "7.0.0", path = "../../../primitives/core" }
diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs
index 50d8e2baba60fe783115e6380df18591a7c019ce..b10612dd17094bec88d6ee48aef5625111bcd861 100644
--- a/substrate/client/network/src/config.rs
+++ b/substrate/client/network/src/config.rs
@@ -66,9 +66,8 @@ where
 	/// Assigned role for our node (full, light, ...).
 	pub role: Role,
 
-	/// How to spawn background tasks. If you pass `None`, then a threads pool will be used by
-	/// default.
-	pub executor: Option<Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send>>,
+	/// How to spawn background tasks.
+	pub executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send>,
 
 	/// Network layer configuration.
 	pub network_config: NetworkConfiguration,
diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs
index 00fc78061293dd0750cb41b614c845356dd80072..13b153be11d590653c505797fb107f9b55b31a9e 100644
--- a/substrate/client/network/src/discovery.rs
+++ b/substrate/client/network/src/discovery.rs
@@ -64,7 +64,7 @@ use libp2p::{
 		GetClosestPeersError, Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent,
 		QueryId, QueryResult, Quorum, Record,
 	},
-	mdns::{Mdns, MdnsConfig, MdnsEvent},
+	mdns::{MdnsConfig, MdnsEvent, TokioMdns},
 	multiaddr::Protocol,
 	swarm::{
 		behaviour::toggle::{Toggle, ToggleIntoConnectionHandler},
@@ -235,7 +235,7 @@ impl DiscoveryConfig {
 			allow_private_ipv4,
 			discovery_only_if_under_num,
 			mdns: if enable_mdns {
-				match Mdns::new(MdnsConfig::default()) {
+				match TokioMdns::new(MdnsConfig::default()) {
 					Ok(mdns) => Some(mdns),
 					Err(err) => {
 						warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err);
@@ -266,7 +266,7 @@ pub struct DiscoveryBehaviour {
 	/// it's always enabled in `NetworkWorker::new()`.
 	kademlia: Toggle<Kademlia<MemoryStore>>,
 	/// Discovers nodes on the local network.
-	mdns: Option<Mdns>,
+	mdns: Option<TokioMdns>,
 	/// Stream that fires when we need to perform the next random Kademlia query. `None` if
 	/// random walking is disabled.
 	next_kad_random_query: Option<Delay>,
diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
index 56cfefd75d53d7ba56bf8ea9713a1aea76a15f1d..5d61e10727b66c7903b60525897c3ab534bd0cac 100644
--- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
+++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs
@@ -481,20 +481,25 @@ pub enum NotificationsOutError {
 #[cfg(test)]
 mod tests {
 	use super::{NotificationsIn, NotificationsInOpen, NotificationsOut, NotificationsOutOpen};
-
-	use async_std::net::{TcpListener, TcpStream};
 	use futures::{channel::oneshot, prelude::*};
 	use libp2p::core::upgrade;
+	use tokio::{
+		net::{TcpListener, TcpStream},
+		runtime::Runtime,
+	};
+	use tokio_util::compat::TokioAsyncReadCompatExt;
 
 	#[test]
 	fn basic_works() {
 		const PROTO_NAME: &str = "/test/proto/1";
 		let (listener_addr_tx, listener_addr_rx) = oneshot::channel();
 
-		let client = async_std::task::spawn(async move {
+		let runtime = Runtime::new().unwrap();
+
+		let client = runtime.spawn(async move {
 			let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap();
 			let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound(
-				socket,
+				socket.compat(),
 				NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024),
 				upgrade::Version::V1,
 			)
@@ -505,13 +510,13 @@ mod tests {
 			substream.send(b"test message".to_vec()).await.unwrap();
 		});
 
-		async_std::task::block_on(async move {
+		runtime.block_on(async move {
 			let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
 			listener_addr_tx.send(listener.local_addr().unwrap()).unwrap();
 
 			let (socket, _) = listener.accept().await.unwrap();
 			let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound(
-				socket,
+				socket.compat(),
 				NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024),
 			)
 			.await
@@ -524,7 +529,7 @@ mod tests {
 			assert_eq!(msg.as_ref(), b"test message");
 		});
 
-		async_std::task::block_on(client);
+		runtime.block_on(client).unwrap();
 	}
 
 	#[test]
@@ -534,10 +539,12 @@ mod tests {
 		const PROTO_NAME: &str = "/test/proto/1";
 		let (listener_addr_tx, listener_addr_rx) = oneshot::channel();
 
-		let client = async_std::task::spawn(async move {
+		let runtime = Runtime::new().unwrap();
+
+		let client = runtime.spawn(async move {
 			let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap();
 			let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound(
-				socket,
+				socket.compat(),
 				NotificationsOut::new(PROTO_NAME, Vec::new(), vec![], 1024 * 1024),
 				upgrade::Version::V1,
 			)
@@ -548,13 +555,13 @@ mod tests {
 			substream.send(Default::default()).await.unwrap();
 		});
 
-		async_std::task::block_on(async move {
+		runtime.block_on(async move {
 			let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
 			listener_addr_tx.send(listener.local_addr().unwrap()).unwrap();
 
 			let (socket, _) = listener.accept().await.unwrap();
 			let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound(
-				socket,
+				socket.compat(),
 				NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024),
 			)
 			.await
@@ -567,7 +574,7 @@ mod tests {
 			assert!(msg.as_ref().is_empty());
 		});
 
-		async_std::task::block_on(client);
+		runtime.block_on(client).unwrap();
 	}
 
 	#[test]
@@ -575,10 +582,12 @@ mod tests {
 		const PROTO_NAME: &str = "/test/proto/1";
 		let (listener_addr_tx, listener_addr_rx) = oneshot::channel();
 
-		let client = async_std::task::spawn(async move {
+		let runtime = Runtime::new().unwrap();
+
+		let client = runtime.spawn(async move {
 			let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap();
 			let outcome = upgrade::apply_outbound(
-				socket,
+				socket.compat(),
 				NotificationsOut::new(PROTO_NAME, Vec::new(), &b"hello"[..], 1024 * 1024),
 				upgrade::Version::V1,
 			)
@@ -590,13 +599,13 @@ mod tests {
 			assert!(outcome.is_err());
 		});
 
-		async_std::task::block_on(async move {
+		runtime.block_on(async move {
 			let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
 			listener_addr_tx.send(listener.local_addr().unwrap()).unwrap();
 
 			let (socket, _) = listener.accept().await.unwrap();
 			let NotificationsInOpen { handshake, substream, .. } = upgrade::apply_inbound(
-				socket,
+				socket.compat(),
 				NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024),
 			)
 			.await
@@ -608,7 +617,7 @@ mod tests {
 			drop(substream);
 		});
 
-		async_std::task::block_on(client);
+		runtime.block_on(client).unwrap();
 	}
 
 	#[test]
@@ -616,10 +625,12 @@ mod tests {
 		const PROTO_NAME: &str = "/test/proto/1";
 		let (listener_addr_tx, listener_addr_rx) = oneshot::channel();
 
-		let client = async_std::task::spawn(async move {
+		let runtime = Runtime::new().unwrap();
+
+		let client = runtime.spawn(async move {
 			let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap();
 			let ret = upgrade::apply_outbound(
-				socket,
+				socket.compat(),
 				// We check that an initial message that is too large gets refused.
 				NotificationsOut::new(
 					PROTO_NAME,
@@ -633,20 +644,20 @@ mod tests {
 			assert!(ret.is_err());
 		});
 
-		async_std::task::block_on(async move {
+		runtime.block_on(async move {
 			let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
 			listener_addr_tx.send(listener.local_addr().unwrap()).unwrap();
 
 			let (socket, _) = listener.accept().await.unwrap();
 			let ret = upgrade::apply_inbound(
-				socket,
+				socket.compat(),
 				NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024),
 			)
 			.await;
 			assert!(ret.is_err());
 		});
 
-		async_std::task::block_on(client);
+		runtime.block_on(client).unwrap();
 	}
 
 	#[test]
@@ -654,10 +665,12 @@ mod tests {
 		const PROTO_NAME: &str = "/test/proto/1";
 		let (listener_addr_tx, listener_addr_rx) = oneshot::channel();
 
-		let client = async_std::task::spawn(async move {
+		let runtime = Runtime::new().unwrap();
+
+		let client = runtime.spawn(async move {
 			let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap();
 			let ret = upgrade::apply_outbound(
-				socket,
+				socket.compat(),
 				NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024),
 				upgrade::Version::V1,
 			)
@@ -665,13 +678,13 @@ mod tests {
 			assert!(ret.is_err());
 		});
 
-		async_std::task::block_on(async move {
+		runtime.block_on(async move {
 			let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
 			listener_addr_tx.send(listener.local_addr().unwrap()).unwrap();
 
 			let (socket, _) = listener.accept().await.unwrap();
 			let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound(
-				socket,
+				socket.compat(),
 				NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024),
 			)
 			.await
@@ -683,6 +696,6 @@ mod tests {
 			let _ = substream.next().await;
 		});
 
-		async_std::task::block_on(client);
+		runtime.block_on(client).unwrap();
 	}
 }
diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs
index 7d756ed2d1e88b87888b92521f0ba009b64864a3..d35594a07e38a07e8ffa0e597163737f8cc4c292 100644
--- a/substrate/client/network/src/service.rs
+++ b/substrate/client/network/src/service.rs
@@ -383,15 +383,15 @@ where
 				.notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed"))
 				.connection_event_buffer_size(1024)
 				.max_negotiating_inbound_streams(2048);
-			if let Some(spawner) = params.executor {
-				struct SpawnImpl<F>(F);
-				impl<F: Fn(Pin<Box<dyn Future<Output = ()> + Send>>)> Executor for SpawnImpl<F> {
-					fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
-						(self.0)(f)
-					}
+
+			struct SpawnImpl<F>(F);
+			impl<F: Fn(Pin<Box<dyn Future<Output = ()> + Send>>)> Executor for SpawnImpl<F> {
+				fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
+					(self.0)(f)
 				}
-				builder = builder.executor(Box::new(SpawnImpl(spawner)));
 			}
+			builder = builder.executor(Box::new(SpawnImpl(params.executor)));
+
 			(builder.build(), bandwidth)
 		};
 
diff --git a/substrate/client/network/src/service/tests/chain_sync.rs b/substrate/client/network/src/service/tests/chain_sync.rs
index b62fb36461860e09e3ff0239be56850c319437bc..bd4967f25973acde75a5969308972c40a8969116 100644
--- a/substrate/client/network/src/service/tests/chain_sync.rs
+++ b/substrate/client/network/src/service/tests/chain_sync.rs
@@ -44,6 +44,7 @@ use std::{
 	time::Duration,
 };
 use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _};
+use tokio::runtime::Handle;
 
 fn set_default_expecations_no_peers(
 	chain_sync: &mut MockChainSync<substrate_test_runtime_client::runtime::Block>,
@@ -59,7 +60,7 @@ fn set_default_expecations_no_peers(
 	});
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn normal_network_poll_no_peers() {
 	// build `ChainSync` and set default expectations for it
 	let mut chain_sync =
@@ -71,7 +72,7 @@ async fn normal_network_poll_no_peers() {
 	let chain_sync_service =
 		Box::new(MockChainSyncInterface::<substrate_test_runtime_client::runtime::Block>::new());
 
-	let mut network = TestNetworkBuilder::new()
+	let mut network = TestNetworkBuilder::new(Handle::current())
 		.with_chain_sync((chain_sync, chain_sync_service))
 		.build();
 
@@ -83,7 +84,7 @@ async fn normal_network_poll_no_peers() {
 	.await;
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn request_justification() {
 	// build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be
 	// called)
@@ -104,7 +105,7 @@ async fn request_justification() {
 		.returning(|_, _| ());
 
 	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
+	let mut network = TestNetworkBuilder::new(Handle::current())
 		.with_chain_sync((chain_sync, chain_sync_service))
 		.build();
 
@@ -118,7 +119,7 @@ async fn request_justification() {
 	.await;
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn clear_justification_requests() {
 	// build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be
 	// called)
@@ -132,7 +133,7 @@ async fn clear_justification_requests() {
 	chain_sync.expect_clear_justification_requests().once().returning(|| ());
 
 	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
+	let mut network = TestNetworkBuilder::new(Handle::current())
 		.with_chain_sync((chain_sync, chain_sync_service))
 		.build();
 
@@ -146,7 +147,7 @@ async fn clear_justification_requests() {
 	.await;
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn set_sync_fork_request() {
 	// build `ChainSync` and set default expectations for it
 	let mut chain_sync =
@@ -171,7 +172,7 @@ async fn set_sync_fork_request() {
 		.once()
 		.returning(|_, _, _| ());
 
-	let mut network = TestNetworkBuilder::new()
+	let mut network = TestNetworkBuilder::new(Handle::current())
 		.with_chain_sync((chain_sync, Box::new(chain_sync_service)))
 		.build();
 
@@ -185,7 +186,7 @@ async fn set_sync_fork_request() {
 	.await;
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn on_block_finalized() {
 	let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0);
 	// build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be
@@ -215,7 +216,7 @@ async fn on_block_finalized() {
 		.returning(|_, _| ());
 
 	set_default_expecations_no_peers(&mut chain_sync);
-	let mut network = TestNetworkBuilder::new()
+	let mut network = TestNetworkBuilder::new(Handle::current())
 		.with_client(client)
 		.with_chain_sync((chain_sync, chain_sync_service))
 		.build();
@@ -232,7 +233,7 @@ async fn on_block_finalized() {
 
 // report from mock import queue that importing a justification was not successful
 // and verify that connection to the peer is closed
-#[async_std::test]
+#[tokio::test]
 async fn invalid_justification_imported() {
 	struct DummyImportQueue(
 		Arc<
@@ -279,13 +280,13 @@ async fn invalid_justification_imported() {
 	let justification_info = Arc::new(RwLock::new(None));
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let (service1, mut event_stream1) = TestNetworkBuilder::new()
+	let (service1, mut event_stream1) = TestNetworkBuilder::new(Handle::current())
 		.with_import_queue(Box::new(DummyImportQueue(justification_info.clone())))
 		.with_listen_addresses(vec![listen_addr.clone()])
 		.build()
 		.start_network();
 
-	let (service2, mut event_stream2) = TestNetworkBuilder::new()
+	let (service2, mut event_stream2) = TestNetworkBuilder::new(Handle::current())
 		.with_set_config(SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -320,15 +321,12 @@ async fn invalid_justification_imported() {
 		while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {}
 	};
 
-	if async_std::future::timeout(Duration::from_secs(5), wait_disconnection)
-		.await
-		.is_err()
-	{
+	if tokio::time::timeout(Duration::from_secs(5), wait_disconnection).await.is_err() {
 		panic!("did not receive disconnection event in time");
 	}
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn disconnect_peer_using_chain_sync_handle() {
 	let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0);
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
@@ -353,7 +351,7 @@ async fn disconnect_peer_using_chain_sync_handle() {
 	)
 	.unwrap();
 
-	let (node1, mut event_stream1) = TestNetworkBuilder::new()
+	let (node1, mut event_stream1) = TestNetworkBuilder::new(Handle::current())
 		.with_listen_addresses(vec![listen_addr.clone()])
 		.with_chain_sync((Box::new(chain_sync), chain_sync_service))
 		.with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle))
@@ -361,7 +359,7 @@ async fn disconnect_peer_using_chain_sync_handle() {
 		.build()
 		.start_network();
 
-	let (node2, mut event_stream2) = TestNetworkBuilder::new()
+	let (node2, mut event_stream2) = TestNetworkBuilder::new(Handle::current())
 		.with_set_config(SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -394,10 +392,7 @@ async fn disconnect_peer_using_chain_sync_handle() {
 		while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {}
 	};
 
-	if async_std::future::timeout(Duration::from_secs(5), wait_disconnection)
-		.await
-		.is_err()
-	{
+	if tokio::time::timeout(Duration::from_secs(5), wait_disconnection).await.is_err() {
 		panic!("did not receive disconnection event in time");
 	}
 }
diff --git a/substrate/client/network/src/service/tests/mod.rs b/substrate/client/network/src/service/tests/mod.rs
index 1d91fc142672fcbcee0247ff445d1e23c0f6a270..f8635e39e9da9893805648e140a7d5466477925c 100644
--- a/substrate/client/network/src/service/tests/mod.rs
+++ b/substrate/client/network/src/service/tests/mod.rs
@@ -44,6 +44,7 @@ use substrate_test_runtime_client::{
 	runtime::{Block as TestBlock, Hash as TestHash},
 	TestClient, TestClientBuilder, TestClientBuilderExt as _,
 };
+use tokio::runtime::Handle;
 
 #[cfg(test)]
 mod chain_sync;
@@ -58,11 +59,12 @@ const PROTOCOL_NAME: &str = "/foo";
 
 struct TestNetwork {
 	network: TestNetworkWorker,
+	rt_handle: Handle,
 }
 
 impl TestNetwork {
-	pub fn new(network: TestNetworkWorker) -> Self {
-		Self { network }
+	pub fn new(network: TestNetworkWorker, rt_handle: Handle) -> Self {
+		Self { network, rt_handle }
 	}
 
 	pub fn service(&self) -> &Arc<TestNetworkService> {
@@ -80,7 +82,7 @@ impl TestNetwork {
 		let service = worker.service().clone();
 		let event_stream = service.event_stream("test");
 
-		async_std::task::spawn(async move {
+		self.rt_handle.spawn(async move {
 			futures::pin_mut!(worker);
 			let _ = worker.await;
 		});
@@ -97,10 +99,11 @@ struct TestNetworkBuilder {
 	chain_sync: Option<(Box<dyn ChainSyncT<TestBlock>>, Box<dyn ChainSyncInterface<TestBlock>>)>,
 	chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>,
 	config: Option<config::NetworkConfiguration>,
+	rt_handle: Handle,
 }
 
 impl TestNetworkBuilder {
-	pub fn new() -> Self {
+	pub fn new(rt_handle: Handle) -> Self {
 		Self {
 			import_queue: None,
 			client: None,
@@ -109,6 +112,7 @@ impl TestNetworkBuilder {
 			chain_sync: None,
 			chain_sync_network: None,
 			config: None,
+			rt_handle,
 		}
 	}
 
@@ -222,21 +226,21 @@ impl TestNetworkBuilder {
 		let block_request_protocol_config = {
 			let (handler, protocol_config) =
 				BlockRequestHandler::new(&protocol_id, None, client.clone(), 50);
-			async_std::task::spawn(handler.run().boxed());
+			self.rt_handle.spawn(handler.run().boxed());
 			protocol_config
 		};
 
 		let state_request_protocol_config = {
 			let (handler, protocol_config) =
 				StateRequestHandler::new(&protocol_id, None, client.clone(), 50);
-			async_std::task::spawn(handler.run().boxed());
+			self.rt_handle.spawn(handler.run().boxed());
 			protocol_config
 		};
 
 		let light_client_request_protocol_config = {
 			let (handler, protocol_config) =
 				LightClientRequestHandler::new(&protocol_id, None, client.clone());
-			async_std::task::spawn(handler.run().boxed());
+			self.rt_handle.spawn(handler.run().boxed());
 			protocol_config
 		};
 
@@ -295,6 +299,11 @@ impl TestNetworkBuilder {
 			(Box::new(chain_sync), chain_sync_service)
 		});
 
+		let handle = self.rt_handle.clone();
+		let executor = move |f| {
+			handle.spawn(f);
+		};
+
 		let worker = NetworkWorker::<
 			substrate_test_runtime_client::runtime::Block,
 			substrate_test_runtime_client::runtime::Hash,
@@ -302,7 +311,7 @@ impl TestNetworkBuilder {
 		>::new(config::Params {
 			block_announce_config,
 			role: config::Role::Full,
-			executor: None,
+			executor: Box::new(executor),
 			network_config,
 			chain: client.clone(),
 			protocol_id,
@@ -321,10 +330,10 @@ impl TestNetworkBuilder {
 		.unwrap();
 
 		let service = worker.service().clone();
-		async_std::task::spawn(async move {
+		self.rt_handle.spawn(async move {
 			let _ = chain_sync_network_provider.run(service).await;
 		});
 
-		TestNetwork::new(worker)
+		TestNetwork::new(worker, self.rt_handle)
 	}
 }
diff --git a/substrate/client/network/src/service/tests/service.rs b/substrate/client/network/src/service/tests/service.rs
index 90945fdcef2cf7999cbed1c66d4b7ff2f4c5417b..aa74e595fff7ec23c2cb3d56686eb2c3d8a35016 100644
--- a/substrate/client/network/src/service/tests/service.rs
+++ b/substrate/client/network/src/service/tests/service.rs
@@ -26,6 +26,7 @@ use sc_network_common::{
 	service::{NetworkNotification, NetworkPeers, NetworkStateInfo},
 };
 use std::{sync::Arc, time::Duration};
+use tokio::runtime::Handle;
 
 type TestNetworkService = NetworkService<
 	substrate_test_runtime_client::runtime::Block,
@@ -37,7 +38,9 @@ const PROTOCOL_NAME: &str = "/foo";
 
 /// Builds two nodes and their associated events stream.
 /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered.
-fn build_nodes_one_proto() -> (
+fn build_nodes_one_proto(
+	rt_handle: &Handle,
+) -> (
 	Arc<TestNetworkService>,
 	impl Stream<Item = Event>,
 	Arc<TestNetworkService>,
@@ -45,12 +48,12 @@ fn build_nodes_one_proto() -> (
 ) {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let (node1, events_stream1) = TestNetworkBuilder::new()
+	let (node1, events_stream1) = TestNetworkBuilder::new(rt_handle.clone())
 		.with_listen_addresses(vec![listen_addr.clone()])
 		.build()
 		.start_network();
 
-	let (node2, events_stream2) = TestNetworkBuilder::new()
+	let (node2, events_stream2) = TestNetworkBuilder::new(rt_handle.clone())
 		.with_set_config(SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -69,7 +72,10 @@ fn notifications_state_consistent() {
 	// Runs two nodes and ensures that events are propagated out of the API in a consistent
 	// correct order, which means no notification received on a closed substream.
 
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
+	let runtime = tokio::runtime::Runtime::new().unwrap();
+
+	let (node1, mut events_stream1, node2, mut events_stream2) =
+		build_nodes_one_proto(runtime.handle());
 
 	// Write some initial notifications that shouldn't get through.
 	for _ in 0..(rand::random::<u8>() % 5) {
@@ -87,7 +93,7 @@ fn notifications_state_consistent() {
 		);
 	}
 
-	async_std::task::block_on(async move {
+	runtime.block_on(async move {
 		// True if we have an active substream from node1 to node2.
 		let mut node1_to_node2_open = false;
 		// True if we have an active substream from node2 to node1.
@@ -216,11 +222,11 @@ fn notifications_state_consistent() {
 	});
 }
 
-#[async_std::test]
+#[tokio::test]
 async fn lots_of_incoming_peers_works() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let (main_node, _) = TestNetworkBuilder::new()
+	let (main_node, _) = TestNetworkBuilder::new(Handle::current())
 		.with_listen_addresses(vec![listen_addr.clone()])
 		.with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() })
 		.build()
@@ -233,7 +239,7 @@ async fn lots_of_incoming_peers_works() {
 	let mut background_tasks_to_wait = Vec::new();
 
 	for _ in 0..32 {
-		let (_dialing_node, event_stream) = TestNetworkBuilder::new()
+		let (_dialing_node, event_stream) = TestNetworkBuilder::new(Handle::current())
 			.with_set_config(SetConfig {
 				reserved_nodes: vec![MultiaddrWithPeerId {
 					multiaddr: listen_addr.clone(),
@@ -244,7 +250,7 @@ async fn lots_of_incoming_peers_works() {
 			.build()
 			.start_network();
 
-		background_tasks_to_wait.push(async_std::task::spawn(async move {
+		background_tasks_to_wait.push(tokio::spawn(async move {
 			// Create a dummy timer that will "never" fire, and that will be overwritten when we
 			// actually need the timer. Using an Option would be technically cleaner, but it would
 			// make the code below way more complicated.
@@ -287,10 +293,13 @@ fn notifications_back_pressure() {
 
 	const TOTAL_NOTIFS: usize = 10_000;
 
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
+	let runtime = tokio::runtime::Runtime::new().unwrap();
+
+	let (node1, mut events_stream1, node2, mut events_stream2) =
+		build_nodes_one_proto(runtime.handle());
 	let node2_id = node2.local_peer_id();
 
-	let receiver = async_std::task::spawn(async move {
+	let receiver = runtime.spawn(async move {
 		let mut received_notifications = 0;
 
 		while received_notifications < TOTAL_NOTIFS {
@@ -306,12 +315,12 @@ fn notifications_back_pressure() {
 			};
 
 			if rand::random::<u8>() < 2 {
-				async_std::task::sleep(Duration::from_millis(rand::random::<u64>() % 750)).await;
+				tokio::time::sleep(Duration::from_millis(rand::random::<u64>() % 750)).await;
 			}
 		}
 	});
 
-	async_std::task::block_on(async move {
+	runtime.block_on(async move {
 		// Wait for the `NotificationStreamOpened`.
 		loop {
 			match events_stream1.next().await.unwrap() {
@@ -331,7 +340,7 @@ fn notifications_back_pressure() {
 				.unwrap();
 		}
 
-		receiver.await;
+		receiver.await.unwrap();
 	});
 }
 
@@ -341,8 +350,10 @@ fn fallback_name_working() {
 	// they can connect.
 	const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME";
 
+	let runtime = tokio::runtime::Runtime::new().unwrap();
+
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
-	let (node1, mut events_stream1) = TestNetworkBuilder::new()
+	let (node1, mut events_stream1) = TestNetworkBuilder::new(runtime.handle().clone())
 		.with_config(config::NetworkConfiguration {
 			extra_sets: vec![NonDefaultSetConfig {
 				notifications_protocol: NEW_PROTOCOL_NAME.into(),
@@ -358,7 +369,7 @@ fn fallback_name_working() {
 		.build()
 		.start_network();
 
-	let (_, mut events_stream2) = TestNetworkBuilder::new()
+	let (_, mut events_stream2) = TestNetworkBuilder::new(runtime.handle().clone())
 		.with_set_config(SetConfig {
 			reserved_nodes: vec![MultiaddrWithPeerId {
 				multiaddr: listen_addr,
@@ -369,7 +380,7 @@ fn fallback_name_working() {
 		.build()
 		.start_network();
 
-	let receiver = async_std::task::spawn(async move {
+	let receiver = runtime.spawn(async move {
 		// Wait for the `NotificationStreamOpened`.
 		loop {
 			match events_stream2.next().await.unwrap() {
@@ -383,7 +394,7 @@ fn fallback_name_working() {
 		}
 	});
 
-	async_std::task::block_on(async move {
+	runtime.block_on(async move {
 		// Wait for the `NotificationStreamOpened`.
 		loop {
 			match events_stream1.next().await.unwrap() {
@@ -397,15 +408,16 @@ fn fallback_name_working() {
 			};
 		}
 
-		receiver.await;
+		receiver.await.unwrap();
 	});
 }
 
 // Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement
 // protocol name and verify that `SyncDisconnected` event is emitted
-#[async_std::test]
+#[tokio::test]
 async fn disconnect_sync_peer_using_block_announcement_protocol_name() {
-	let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
+	let (node1, mut events_stream1, node2, mut events_stream2) =
+		build_nodes_one_proto(&Handle::current());
 
 	async fn wait_for_events(stream: &mut (impl Stream<Item = Event> + std::marker::Unpin)) {
 		let mut notif_received = false;
@@ -437,12 +449,12 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() {
 	assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. })));
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_listen_addresses_consistent_with_transport_memory() {
+async fn ensure_listen_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			transport: TransportConfig::MemoryOnly,
@@ -457,12 +469,12 @@ fn ensure_listen_addresses_consistent_with_transport_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_listen_addresses_consistent_with_transport_not_memory() {
+async fn ensure_listen_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			..config::NetworkConfiguration::new(
@@ -476,16 +488,16 @@ fn ensure_listen_addresses_consistent_with_transport_not_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_boot_node_addresses_consistent_with_transport_memory() {
+async fn ensure_boot_node_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 	let boot_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)],
 		peer_id: PeerId::random(),
 	};
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			transport: TransportConfig::MemoryOnly,
@@ -501,16 +513,16 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
+async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 	let boot_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Memory(rand::random::<u64>())],
 		peer_id: PeerId::random(),
 	};
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			boot_nodes: vec![boot_node],
@@ -525,16 +537,16 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
+async fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 	let reserved_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)],
 		peer_id: PeerId::random(),
 	};
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			transport: TransportConfig::MemoryOnly,
@@ -553,16 +565,16 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
+async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 	let reserved_node = MultiaddrWithPeerId {
 		multiaddr: config::build_multiaddr![Memory(rand::random::<u64>())],
 		peer_id: PeerId::random(),
 	};
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			default_peers_set: SetConfig {
@@ -580,13 +592,13 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_public_addresses_consistent_with_transport_memory() {
+async fn ensure_public_addresses_consistent_with_transport_memory() {
 	let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
 	let public_address = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			transport: TransportConfig::MemoryOnly,
@@ -602,13 +614,13 @@ fn ensure_public_addresses_consistent_with_transport_memory() {
 		.start_network();
 }
 
-#[test]
+#[tokio::test]
 #[should_panic(expected = "don't match the transport")]
-fn ensure_public_addresses_consistent_with_transport_not_memory() {
+async fn ensure_public_addresses_consistent_with_transport_not_memory() {
 	let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)];
 	let public_address = config::build_multiaddr![Memory(rand::random::<u64>())];
 
-	let _ = TestNetworkBuilder::new()
+	let _ = TestNetworkBuilder::new(Handle::current())
 		.with_config(config::NetworkConfiguration {
 			listen_addresses: vec![listen_addr.clone()],
 			public_addresses: vec![public_address],
diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs
index 23645b11795c39c952759354e876692f947268a9..b0d5f0235b6e49f5acd38157fa0fec4c50be7cd4 100644
--- a/substrate/client/network/src/transport.rs
+++ b/substrate/client/network/src/transport.rs
@@ -55,16 +55,16 @@ pub fn build_transport(
 	// Build the base layer of the transport.
 	let transport = if !memory_only {
 		let tcp_config = tcp::GenTcpConfig::new().nodelay(true);
-		let desktop_trans = tcp::TcpTransport::new(tcp_config.clone());
+		let desktop_trans = tcp::TokioTcpTransport::new(tcp_config.clone());
 		let desktop_trans = websocket::WsConfig::new(desktop_trans)
-			.or_transport(tcp::TcpTransport::new(tcp_config.clone()));
-		let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans));
+			.or_transport(tcp::TokioTcpTransport::new(tcp_config.clone()));
+		let dns_init = dns::TokioDnsConfig::system(desktop_trans);
 		EitherTransport::Left(if let Ok(dns) = dns_init {
 			EitherTransport::Left(dns)
 		} else {
-			let desktop_trans = tcp::TcpTransport::new(tcp_config.clone());
+			let desktop_trans = tcp::TokioTcpTransport::new(tcp_config.clone());
 			let desktop_trans = websocket::WsConfig::new(desktop_trans)
-				.or_transport(tcp::TcpTransport::new(tcp_config));
+				.or_transport(tcp::TokioTcpTransport::new(tcp_config));
 			EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport))
 		})
 	} else {
diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml
index 841388c7a68ee6efe2f21f7736d59fd549cca0e1..263c2d40c22730279d15a748da030bf64596d31c 100644
--- a/substrate/client/network/sync/Cargo.toml
+++ b/substrate/client/network/sync/Cargo.toml
@@ -42,7 +42,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/final
 sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" }
 
 [dev-dependencies]
-async-std = { version = "1.11.0", features = ["attributes"] }
+tokio = { version = "1.22.0", features = ["macros"] }
 quickcheck = { version = "1.0.3", default-features = false }
 sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" }
 sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" }
diff --git a/substrate/client/network/sync/src/service/network.rs b/substrate/client/network/sync/src/service/network.rs
index 43501baeec7be52eaab8dbf65b45b9a8c849a35c..c44398b0f1a9ec983bcfa1f34c322e46dda3c7e8 100644
--- a/substrate/client/network/sync/src/service/network.rs
+++ b/substrate/client/network/sync/src/service/network.rs
@@ -126,7 +126,7 @@ mod tests {
 
 	// typical pattern in `Protocol` code where peer is disconnected
 	// and then reported
-	#[async_std::test]
+	#[tokio::test]
 	async fn disconnect_and_report_peer() {
 		let (provider, handle) = NetworkServiceProvider::new();
 
@@ -147,7 +147,7 @@ mod tests {
 			.once()
 			.returning(|_, _| ());
 
-		async_std::task::spawn(async move {
+		tokio::spawn(async move {
 			provider.run(Arc::new(mock_network)).await;
 		});
 
diff --git a/substrate/client/network/sync/src/tests.rs b/substrate/client/network/sync/src/tests.rs
index bd78c3b45226d023c37bf7cfecfb23c71f7f3b08..a03e657f03ab215757b0d2a3acceb5dcddd5fe16 100644
--- a/substrate/client/network/sync/src/tests.rs
+++ b/substrate/client/network/sync/src/tests.rs
@@ -35,7 +35,7 @@ use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _
 
 // verify that the fork target map is empty, then submit a new sync fork request,
 // poll `ChainSync` and verify that a new sync fork request has been registered
-#[async_std::test]
+#[tokio::test]
 async fn delegate_to_chainsync() {
 	let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new();
 	let (mut chain_sync, chain_sync_service, _) = ChainSync::new(
diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml
index eb4d54b9dc82df5ea5011158404a3993063bdbd9..86b5be37d256a125aa549239433d888705657a6e 100644
--- a/substrate/client/network/test/Cargo.toml
+++ b/substrate/client/network/test/Cargo.toml
@@ -13,7 +13,7 @@ repository = "https://github.com/paritytech/substrate/"
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-async-std = "1.11.0"
+tokio = "1.22.0"
 async-trait = "0.1.57"
 futures = "0.3.21"
 futures-timer = "3.0.1"
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index 4eb93499d7435e262f0b8fc6fa3f5dea9d1851b1..d3642e69cb63268b68cc6ad52b003f288d64d9ad 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -31,7 +31,6 @@ use std::{
 	time::Duration,
 };
 
-use async_std::future::timeout;
 use futures::{future::BoxFuture, prelude::*};
 use libp2p::{build_multiaddr, PeerId};
 use log::trace;
@@ -85,6 +84,7 @@ pub use substrate_test_runtime_client::{
 	runtime::{Block, Extrinsic, Hash, Transfer},
 	TestClient, TestClientBuilder, TestClientBuilderExt,
 };
+use tokio::time::timeout;
 
 type AuthorityId = sp_consensus_babe::AuthorityId;
 
@@ -708,7 +708,16 @@ pub struct FullPeerConfig {
 	pub storage_chain: bool,
 }
 
-pub trait TestNetFactory: Default + Sized
+/// Trait for text fixtures with tokio runtime.
+pub trait WithRuntime {
+	/// Construct with runtime handle.
+	fn with_runtime(rt_handle: tokio::runtime::Handle) -> Self;
+	/// Get runtime handle.
+	fn rt_handle(&self) -> &tokio::runtime::Handle;
+}
+
+#[async_trait::async_trait]
+pub trait TestNetFactory: WithRuntime + Sized
 where
 	<Self::BlockImport as BlockImport<Block>>::Transaction: Send,
 {
@@ -738,9 +747,9 @@ where
 	);
 
 	/// Create new test network with this many peers.
-	fn new(n: usize) -> Self {
+	fn new(rt_handle: tokio::runtime::Handle, n: usize) -> Self {
 		trace!(target: "test_network", "Creating test network");
-		let mut net = Self::default();
+		let mut net = Self::with_runtime(rt_handle);
 
 		for i in 0..n {
 			trace!(target: "test_network", "Adding peer {}", i);
@@ -894,9 +903,14 @@ where
 		)
 		.unwrap();
 
+		let handle = self.rt_handle().clone();
+		let executor = move |f| {
+			handle.spawn(f);
+		};
+
 		let network = NetworkWorker::new(sc_network::config::Params {
 			role: if config.is_authority { Role::Authority } else { Role::Full },
-			executor: None,
+			executor: Box::new(executor),
 			network_config,
 			chain: client.clone(),
 			protocol_id,
@@ -919,7 +933,7 @@ where
 		trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id());
 
 		let service = network.service().clone();
-		async_std::task::spawn(async move {
+		self.rt_handle().spawn(async move {
 			chain_sync_network_provider.run(service).await;
 		});
 
@@ -950,7 +964,7 @@ where
 
 	/// Used to spawn background tasks, e.g. the block request protocol handler.
 	fn spawn_task(&self, f: BoxFuture<'static, ()>) {
-		async_std::task::spawn(f);
+		self.rt_handle().spawn(f);
 	}
 
 	/// Polls the testnet until all nodes are in sync.
@@ -1009,34 +1023,31 @@ where
 		Poll::Pending
 	}
 
-	/// Blocks the current thread until we are sync'ed.
+	/// Wait until we are sync'ed.
 	///
 	/// Calls `poll_until_sync` repeatedly.
 	/// (If we've not synced within 10 mins then panic rather than hang.)
-	fn block_until_sync(&mut self) {
-		futures::executor::block_on(timeout(
+	async fn wait_until_sync(&mut self) {
+		timeout(
 			Duration::from_secs(10 * 60),
 			futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx)),
-		))
+		)
+		.await
 		.expect("sync didn't happen within 10 mins");
 	}
 
-	/// Blocks the current thread until there are no pending packets.
+	/// Wait until there are no pending packets.
 	///
 	/// Calls `poll_until_idle` repeatedly with the runtime passed as parameter.
-	fn block_until_idle(&mut self) {
-		futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| {
-			self.poll_until_idle(cx)
-		}));
+	async fn wait_until_idle(&mut self) {
+		futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx)).await;
 	}
 
-	/// Blocks the current thread until all peers are connected to each other.
+	/// Wait until all peers are connected to each other.
 	///
 	/// Calls `poll_until_connected` repeatedly with the runtime passed as parameter.
-	fn block_until_connected(&mut self) {
-		futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| {
-			self.poll_until_connected(cx)
-		}));
+	async fn wait_until_connected(&mut self) {
+		futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await;
 	}
 
 	/// Polls the testnet. Processes all the pending actions.
@@ -1067,11 +1078,20 @@ where
 	}
 }
 
-#[derive(Default)]
 pub struct TestNet {
+	rt_handle: tokio::runtime::Handle,
 	peers: Vec<Peer<(), PeersClient>>,
 }
 
+impl WithRuntime for TestNet {
+	fn with_runtime(rt_handle: tokio::runtime::Handle) -> Self {
+		TestNet { rt_handle, peers: Vec::new() }
+	}
+	fn rt_handle(&self) -> &tokio::runtime::Handle {
+		&self.rt_handle
+	}
+}
+
 impl TestNetFactory for TestNet {
 	type Verifier = PassThroughVerifier;
 	type PeerData = ();
@@ -1126,10 +1146,17 @@ impl JustificationImport<Block> for ForceFinalized {
 			.map_err(|_| ConsensusError::InvalidJustification)
 	}
 }
-
-#[derive(Default)]
 pub struct JustificationTestNet(TestNet);
 
+impl WithRuntime for JustificationTestNet {
+	fn with_runtime(rt_handle: tokio::runtime::Handle) -> Self {
+		JustificationTestNet(TestNet::with_runtime(rt_handle))
+	}
+	fn rt_handle(&self) -> &tokio::runtime::Handle {
+		&self.0.rt_handle()
+	}
+}
+
 impl TestNetFactory for JustificationTestNet {
 	type Verifier = PassThroughVerifier;
 	type PeerData = ();
diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs
index ea3895cb4dfde8542c4f8ea64d17d8a604812d29..efe0e0577c11ef076701e957007816cdd4e4dd51 100644
--- a/substrate/client/network/test/src/sync.rs
+++ b/substrate/client/network/test/src/sync.rs
@@ -17,14 +17,16 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use super::*;
-use futures::{executor::block_on, Future};
+use futures::Future;
 use sp_consensus::{block_validation::Validation, BlockOrigin};
 use sp_runtime::Justifications;
 use substrate_test_runtime::Header;
+use tokio::runtime::Runtime;
 
 fn test_ancestor_search_when_common_is(n: usize) {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	net.peer(0).push_blocks(n, false);
 	net.peer(1).push_blocks(n, false);
@@ -34,7 +36,7 @@ fn test_ancestor_search_when_common_is(n: usize) {
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
@@ -42,9 +44,10 @@ fn test_ancestor_search_when_common_is(n: usize) {
 #[test]
 fn sync_peers_works() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		for peer in 0..3 {
 			if net.peer(peer).num_peers() != 2 {
@@ -58,7 +61,8 @@ fn sync_peers_works() {
 #[test]
 fn sync_cycle_from_offline_to_syncing_to_offline() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 	for peer in 0..3 {
 		// Offline, and not major syncing.
 		assert!(net.peer(peer).is_offline());
@@ -69,7 +73,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() {
 	net.peer(2).push_blocks(100, false);
 
 	// Block until all nodes are online and nodes 0 and 1 and major syncing.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		for peer in 0..3 {
 			// Online
@@ -87,7 +91,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() {
 	}));
 
 	// Block until all nodes are done syncing.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		for peer in 0..3 {
 			if net.peer(peer).is_major_syncing() {
@@ -100,7 +104,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() {
 	// Now drop nodes 1 and 2, and check that node 0 is offline.
 	net.peers.remove(2);
 	net.peers.remove(1);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if !net.peer(0).is_offline() {
 			Poll::Pending
@@ -113,7 +117,8 @@ fn sync_cycle_from_offline_to_syncing_to_offline() {
 #[test]
 fn syncing_node_not_major_syncing_when_disconnected() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	// Generate blocks.
 	net.peer(2).push_blocks(100, false);
@@ -122,7 +127,7 @@ fn syncing_node_not_major_syncing_when_disconnected() {
 	assert!(!net.peer(1).is_major_syncing());
 
 	// Check that we switch to major syncing.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if !net.peer(1).is_major_syncing() {
 			Poll::Pending
@@ -134,7 +139,7 @@ fn syncing_node_not_major_syncing_when_disconnected() {
 	// Destroy two nodes, and check that we switch to non-major syncing.
 	net.peers.remove(2);
 	net.peers.remove(0);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(0).is_major_syncing() {
 			Poll::Pending
@@ -147,10 +152,11 @@ fn syncing_node_not_major_syncing_when_disconnected() {
 #[test]
 fn sync_from_two_peers_works() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 	assert!(!net.peer(0).is_major_syncing());
@@ -159,11 +165,12 @@ fn sync_from_two_peers_works() {
 #[test]
 fn sync_from_two_peers_with_ancestry_search_works() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 	net.peer(0).push_blocks(10, true);
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
@@ -171,13 +178,14 @@ fn sync_from_two_peers_with_ancestry_search_works() {
 #[test]
 fn ancestry_search_works_when_backoff_is_one() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	net.peer(0).push_blocks(1, false);
 	net.peer(1).push_blocks(2, false);
 	net.peer(2).push_blocks(2, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
@@ -185,13 +193,14 @@ fn ancestry_search_works_when_backoff_is_one() {
 #[test]
 fn ancestry_search_works_when_ancestor_is_genesis() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	net.peer(0).push_blocks(13, true);
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
@@ -214,9 +223,10 @@ fn ancestry_search_works_when_common_is_hundred() {
 #[test]
 fn sync_long_chain_works() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 	net.peer(1).push_blocks(500, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
@@ -224,10 +234,11 @@ fn sync_long_chain_works() {
 #[test]
 fn sync_no_common_longer_chain_fails() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 	net.peer(0).push_blocks(20, true);
 	net.peer(1).push_blocks(20, false);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(0).is_major_syncing() {
 			Poll::Pending
@@ -242,9 +253,10 @@ fn sync_no_common_longer_chain_fails() {
 #[test]
 fn sync_justifications() {
 	sp_tracing::try_init_simple();
-	let mut net = JustificationTestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = JustificationTestNet::new(runtime.handle().clone(), 3);
 	net.peer(0).push_blocks(20, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	let backend = net.peer(0).client().as_backend();
 	let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap();
@@ -270,7 +282,7 @@ fn sync_justifications() {
 	net.peer(1).request_justification(&hashof15, 15);
 	net.peer(1).request_justification(&hashof20, 20);
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		for hash in [hashof10, hashof15, hashof20] {
@@ -293,7 +305,8 @@ fn sync_justifications() {
 #[test]
 fn sync_justifications_across_forks() {
 	sp_tracing::try_init_simple();
-	let mut net = JustificationTestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = JustificationTestNet::new(runtime.handle().clone(), 3);
 	// we push 5 blocks
 	net.peer(0).push_blocks(5, false);
 	// and then two forks 5 and 6 blocks long
@@ -302,7 +315,7 @@ fn sync_justifications_across_forks() {
 
 	// peer 1 will only see the longer fork. but we'll request justifications
 	// for both and finalize the small fork instead.
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	let just = (*b"FRNK", Vec::new());
 	net.peer(0).client().finalize_block(f1_best, Some(just), true).unwrap();
@@ -310,7 +323,7 @@ fn sync_justifications_across_forks() {
 	net.peer(1).request_justification(&f1_best, 10);
 	net.peer(1).request_justification(&f2_best, 11);
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		if net.peer(0).client().justifications(f1_best).unwrap() ==
@@ -328,7 +341,8 @@ fn sync_justifications_across_forks() {
 #[test]
 fn sync_after_fork_works() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 	net.peer(0).push_blocks(30, false);
 	net.peer(1).push_blocks(30, false);
 	net.peer(2).push_blocks(30, false);
@@ -341,7 +355,7 @@ fn sync_after_fork_works() {
 	net.peer(2).push_blocks(1, false);
 
 	// peer 1 has the best chain
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	let peer1 = &net.peers()[1];
 	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 	(net.peers()[1].blockchain_canon_equals(peer1));
@@ -351,14 +365,15 @@ fn sync_after_fork_works() {
 #[test]
 fn syncs_all_forks() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(4);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 4);
 	net.peer(0).push_blocks(2, false);
 	net.peer(1).push_blocks(2, false);
 
 	let b1 = net.peer(0).push_blocks(2, true);
 	let b2 = net.peer(1).push_blocks(4, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	// Check that all peers have all of the branches.
 	assert!(net.peer(0).has_block(b1));
 	assert!(net.peer(0).has_block(b2));
@@ -369,12 +384,13 @@ fn syncs_all_forks() {
 #[test]
 fn own_blocks_are_announced() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
-	net.block_until_sync(); // connect'em
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
+	runtime.block_on(net.wait_until_sync()); // connect'em
 	net.peer(0)
 		.generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	assert_eq!(net.peer(0).client.info().best_number, 1);
 	assert_eq!(net.peer(1).client.info().best_number, 1);
@@ -386,7 +402,8 @@ fn own_blocks_are_announced() {
 #[test]
 fn can_sync_small_non_best_forks() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 	net.peer(0).push_blocks(30, false);
 	net.peer(1).push_blocks(30, false);
 
@@ -404,7 +421,7 @@ fn can_sync_small_non_best_forks() {
 	assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none());
 
 	// poll until the two nodes connect, otherwise announcing the block will not work
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(0).num_peers() == 0 {
 			Poll::Pending
@@ -424,7 +441,7 @@ fn can_sync_small_non_best_forks() {
 
 	// after announcing, peer 1 downloads the block.
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some());
@@ -433,11 +450,11 @@ fn can_sync_small_non_best_forks() {
 		}
 		Poll::Ready(())
 	}));
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true);
 	net.peer(0).announce_block(another_fork, None);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() {
 			return Poll::Pending
@@ -449,11 +466,12 @@ fn can_sync_small_non_best_forks() {
 #[test]
 fn can_sync_forks_ahead_of_the_best_chain() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 	net.peer(0).push_blocks(1, false);
 	net.peer(1).push_blocks(1, false);
 
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 	// Peer 0 is on 2-block fork which is announced with is_best=false
 	let fork_hash = net.peer(0).generate_blocks_with_fork_choice(
 		2,
@@ -468,7 +486,7 @@ fn can_sync_forks_ahead_of_the_best_chain() {
 	assert_eq!(net.peer(1).client().info().best_number, 2);
 
 	// after announcing, peer 1 downloads the block.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		if net.peer(1).client().header(&BlockId::Hash(fork_hash)).unwrap().is_none() {
@@ -481,7 +499,8 @@ fn can_sync_forks_ahead_of_the_best_chain() {
 #[test]
 fn can_sync_explicit_forks() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 	net.peer(0).push_blocks(30, false);
 	net.peer(1).push_blocks(30, false);
 
@@ -500,7 +519,7 @@ fn can_sync_explicit_forks() {
 	assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none());
 
 	// poll until the two nodes connect, otherwise announcing the block will not work
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
 			Poll::Pending
@@ -521,7 +540,7 @@ fn can_sync_explicit_forks() {
 	net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number);
 
 	// peer 1 downloads the block.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some());
@@ -535,7 +554,8 @@ fn can_sync_explicit_forks() {
 #[test]
 fn syncs_header_only_forks() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	net.add_full_peer_with_config(Default::default());
 	net.add_full_peer_with_config(FullPeerConfig { blocks_pruning: Some(3), ..Default::default() });
 	net.peer(0).push_blocks(2, false);
@@ -547,10 +567,10 @@ fn syncs_header_only_forks() {
 
 	// Peer 1 will sync the small fork even though common block state is missing
 	while !net.peer(1).has_block(small_hash) {
-		net.block_until_idle();
+		runtime.block_on(net.wait_until_idle());
 	}
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert_eq!(net.peer(0).client().info().best_hash, net.peer(1).client().info().best_hash);
 	assert_ne!(small_hash, net.peer(0).client().info().best_hash);
 }
@@ -558,7 +578,8 @@ fn syncs_header_only_forks() {
 #[test]
 fn does_not_sync_announced_old_best_block() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(3);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	let old_hash = net.peer(0).push_blocks(1, false);
 	let old_hash_with_parent = net.peer(0).push_blocks(1, false);
@@ -566,7 +587,7 @@ fn does_not_sync_announced_old_best_block() {
 	net.peer(1).push_blocks(20, true);
 
 	net.peer(0).announce_block(old_hash, None);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		// poll once to import announcement
 		net.poll(cx);
 		Poll::Ready(())
@@ -574,7 +595,7 @@ fn does_not_sync_announced_old_best_block() {
 	assert!(!net.peer(1).is_major_syncing());
 
 	net.peer(0).announce_block(old_hash_with_parent, None);
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		// poll once to import announcement
 		net.poll(cx);
 		Poll::Ready(())
@@ -586,11 +607,12 @@ fn does_not_sync_announced_old_best_block() {
 fn full_sync_requires_block_body() {
 	// Check that we don't sync headers-only in full mode.
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 
 	net.peer(0).push_headers(1);
 	// Wait for nodes to connect
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
 			Poll::Pending
@@ -598,20 +620,21 @@ fn full_sync_requires_block_body() {
 			Poll::Ready(())
 		}
 	}));
-	net.block_until_idle();
+	runtime.block_on(net.wait_until_idle());
 	assert_eq!(net.peer(1).client.info().best_number, 0);
 }
 
 #[test]
 fn imports_stale_once() {
 	sp_tracing::try_init_simple();
+	let runtime = Runtime::new().unwrap();
 
-	fn import_with_announce(net: &mut TestNet, hash: H256) {
+	fn import_with_announce(runtime: &Runtime, net: &mut TestNet, hash: H256) {
 		// Announce twice
 		net.peer(0).announce_block(hash, None);
 		net.peer(0).announce_block(hash, None);
 
-		block_on(futures::future::poll_fn::<(), _>(|cx| {
+		runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 			net.poll(cx);
 			if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() {
 				Poll::Ready(())
@@ -622,33 +645,34 @@ fn imports_stale_once() {
 	}
 
 	// given the network with 2 full nodes
-	let mut net = TestNet::new(2);
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 
 	// let them connect to each other
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	// check that NEW block is imported from announce message
 	let new_hash = net.peer(0).push_blocks(1, false);
-	import_with_announce(&mut net, new_hash);
+	import_with_announce(&runtime, &mut net, new_hash);
 	assert_eq!(net.peer(1).num_downloaded_blocks(), 1);
 
 	// check that KNOWN STALE block is imported from announce message
 	let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true);
-	import_with_announce(&mut net, known_stale_hash);
+	import_with_announce(&runtime, &mut net, known_stale_hash);
 	assert_eq!(net.peer(1).num_downloaded_blocks(), 2);
 }
 
 #[test]
 fn can_sync_to_peers_with_wrong_common_block() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 
 	net.peer(0).push_blocks(2, true);
 	net.peer(1).push_blocks(2, true);
 	let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false);
 	net.peer(1).push_blocks_at(BlockId::Number(0), 2, false);
 	// wait for connection
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 
 	// both peers re-org to the same fork without notifying each other
 	let just = Some((*b"FRNK", Vec::new()));
@@ -656,7 +680,7 @@ fn can_sync_to_peers_with_wrong_common_block() {
 	net.peer(1).client().finalize_block(fork_hash, just, true).unwrap();
 	let final_hash = net.peer(0).push_blocks(1, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	assert!(net.peer(1).has_block(final_hash));
 }
@@ -701,7 +725,8 @@ impl BlockAnnounceValidator<Block> for FailingBlockAnnounceValidator {
 #[test]
 fn sync_blocks_when_block_announce_validator_says_it_is_new_best() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	net.add_full_peer_with_config(Default::default());
 	net.add_full_peer_with_config(Default::default());
 	net.add_full_peer_with_config(FullPeerConfig {
@@ -709,7 +734,7 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() {
 		..Default::default()
 	});
 
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 
 	// Add blocks but don't set them as best
 	let block_hash = net.peer(0).generate_blocks_with_fork_choice(
@@ -720,7 +745,7 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() {
 	);
 
 	while !net.peer(2).has_block(block_hash) {
-		net.block_until_idle();
+		runtime.block_on(net.wait_until_idle());
 	}
 }
 
@@ -745,14 +770,15 @@ impl BlockAnnounceValidator<Block> for DeferredBlockAnnounceValidator {
 #[test]
 fn wait_until_deferred_block_announce_validation_is_ready() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	net.add_full_peer_with_config(Default::default());
 	net.add_full_peer_with_config(FullPeerConfig {
 		block_announce_validator: Some(Box::new(NewBestBlockAnnounceValidator)),
 		..Default::default()
 	});
 
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 
 	// Add blocks but don't set them as best
 	let block_hash = net.peer(0).generate_blocks_with_fork_choice(
@@ -763,7 +789,7 @@ fn wait_until_deferred_block_announce_validation_is_ready() {
 	);
 
 	while !net.peer(1).has_block(block_hash) {
-		net.block_until_idle();
+		runtime.block_on(net.wait_until_idle());
 	}
 }
 
@@ -772,7 +798,8 @@ fn wait_until_deferred_block_announce_validation_is_ready() {
 #[test]
 fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 1);
 
 	// Produce some blocks
 	let block_hash =
@@ -780,16 +807,18 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() {
 			.push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true, true);
 
 	// Add a node and wait until they are connected
-	net.add_full_peer_with_config(Default::default());
-	net.block_until_connected();
-	net.block_until_idle();
+	runtime.block_on(async {
+		net.add_full_peer_with_config(Default::default());
+		net.wait_until_connected().await;
+		net.wait_until_idle().await;
+	});
 
 	// The peer should not have synced the block.
 	assert!(!net.peer(1).has_block(block_hash));
 
 	// Make sync protocol aware of the best block
 	net.peer(0).network_service().new_best_block_imported(block_hash, 3);
-	net.block_until_idle();
+	runtime.block_on(net.wait_until_idle());
 
 	// Connect another node that should now sync to the tip
 	net.add_full_peer_with_config(FullPeerConfig {
@@ -797,7 +826,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() {
 		..Default::default()
 	});
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(2).has_block(block_hash) {
 			Poll::Ready(())
@@ -815,8 +844,9 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() {
 #[test]
 fn sync_to_tip_when_we_sync_together_with_multiple_peers() {
 	sp_tracing::try_init_simple();
+	let runtime = Runtime::new().unwrap();
 
-	let mut net = TestNet::new(3);
+	let mut net = TestNet::new(runtime.handle().clone(), 3);
 
 	let block_hash =
 		net.peer(0)
@@ -825,8 +855,10 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() {
 	net.peer(1)
 		.push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false, false);
 
-	net.block_until_connected();
-	net.block_until_idle();
+	runtime.block_on(async {
+		net.wait_until_connected().await;
+		net.wait_until_idle().await;
+	});
 
 	assert!(!net.peer(2).has_block(block_hash));
 
@@ -834,7 +866,7 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() {
 	net.peer(0).network_service().announce_block(block_hash, None);
 
 	while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) {
-		net.block_until_idle();
+		runtime.block_on(net.wait_until_idle());
 	}
 }
 
@@ -865,7 +897,8 @@ fn block_announce_data_is_propagated() {
 	}
 
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 1);
 
 	net.add_full_peer_with_config(FullPeerConfig {
 		block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)),
@@ -879,7 +912,7 @@ fn block_announce_data_is_propagated() {
 	});
 
 	// Wait until peer 1 is connected to both nodes.
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(1).num_peers() == 2 &&
 			net.peer(0).num_peers() == 1 &&
@@ -895,7 +928,7 @@ fn block_announce_data_is_propagated() {
 	net.peer(0).announce_block(block_hash, Some(vec![137]));
 
 	while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) {
-		net.block_until_idle();
+		runtime.block_on(net.wait_until_idle());
 	}
 }
 
@@ -925,19 +958,22 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() {
 	}
 
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(1);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 1);
 
 	net.add_full_peer_with_config(FullPeerConfig {
 		block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)),
 		..Default::default()
 	});
 
-	net.block_until_connected();
-	net.block_until_idle();
+	runtime.block_on(async {
+		net.wait_until_connected().await;
+		net.wait_until_idle().await;
+	});
 
 	let block_hash = net.peer(0).push_blocks(500, true);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert!(net.peer(1).has_block(block_hash));
 }
 
@@ -948,9 +984,10 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() {
 #[test]
 fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
 	sp_tracing::try_init_simple();
-	let mut net = JustificationTestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = JustificationTestNet::new(runtime.handle().clone(), 2);
 	net.peer(0).push_blocks(10, false);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash();
 
@@ -967,7 +1004,7 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
 		// justification request.
 		std::thread::sleep(std::time::Duration::from_secs(10));
 		net.peer(0).push_blocks(1, false);
-		net.block_until_sync();
+		runtime.block_on(net.wait_until_sync());
 		assert_eq!(1, net.peer(0).num_peers());
 	}
 
@@ -984,7 +1021,7 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
 		.finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true)
 		.unwrap();
 
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 
 		if net.peer(1).client().justifications(hashof10).unwrap() !=
@@ -1000,18 +1037,19 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
 #[test]
 fn syncs_all_forks_from_single_peer() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 	net.peer(0).push_blocks(10, false);
 	net.peer(1).push_blocks(10, false);
 
 	// poll until the two nodes connect, otherwise announcing the block will not work
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 
 	// Peer 0 produces new blocks and announces.
 	let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true);
 
 	// Wait till peer 1 starts downloading
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(1).network().best_seen_block() != Some(12) {
 			return Poll::Pending
@@ -1022,7 +1060,7 @@ fn syncs_all_forks_from_single_peer() {
 	// Peer 0 produces and announces another fork
 	let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false);
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 
 	// Peer 1 should have both branches,
 	assert!(net.peer(1).client().header(&BlockId::Hash(branch1)).unwrap().is_some());
@@ -1032,7 +1070,8 @@ fn syncs_all_forks_from_single_peer() {
 #[test]
 fn syncs_after_missing_announcement() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	net.add_full_peer_with_config(Default::default());
 	// Set peer 1 to ignore announcement
 	net.add_full_peer_with_config(FullPeerConfig {
@@ -1042,22 +1081,23 @@ fn syncs_after_missing_announcement() {
 	net.peer(0).push_blocks(10, false);
 	net.peer(1).push_blocks(10, false);
 
-	net.block_until_connected();
+	runtime.block_on(net.wait_until_connected());
 
 	// Peer 0 produces a new block and announces. Peer 1 ignores announcement.
 	net.peer(0).push_blocks_at(BlockId::Number(10), 1, false);
 	// Peer 0 produces another block and announces.
 	let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false);
 	net.peer(1).push_blocks_at(BlockId::Number(10), 1, true);
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some());
 }
 
 #[test]
 fn syncs_state() {
 	sp_tracing::try_init_simple();
+	let runtime = Runtime::new().unwrap();
 	for skip_proofs in &[false, true] {
-		let mut net = TestNet::new(0);
+		let mut net = TestNet::new(runtime.handle().clone(), 0);
 		let mut genesis_storage: sp_core::storage::Storage = Default::default();
 		genesis_storage.top.insert(b"additional_key".to_vec(), vec![1]);
 		let mut child_data: std::collections::BTreeMap<Vec<u8>, Vec<u8>> = Default::default();
@@ -1098,7 +1138,7 @@ fn syncs_state() {
 		net.add_full_peer_with_config(config_two);
 		net.peer(0).push_blocks(64, false);
 		// Wait for peer 1 to sync header chain.
-		net.block_until_sync();
+		runtime.block_on(net.wait_until_sync());
 		assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64)));
 
 		let just = (*b"FRNK", Vec::new());
@@ -1111,7 +1151,7 @@ fn syncs_state() {
 			.unwrap();
 		net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap();
 		// Wait for state sync.
-		block_on(futures::future::poll_fn::<(), _>(|cx| {
+		runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 			net.poll(cx);
 			if net.peer(1).client.info().finalized_state.is_some() {
 				Poll::Ready(())
@@ -1121,7 +1161,7 @@ fn syncs_state() {
 		}));
 		assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64)));
 		// Wait for the rest of the states to be imported.
-		block_on(futures::future::poll_fn::<(), _>(|cx| {
+		runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 			net.poll(cx);
 			if net.peer(1).client().has_state_at(&BlockId::Number(64)) {
 				Poll::Ready(())
@@ -1136,7 +1176,8 @@ fn syncs_state() {
 fn syncs_indexed_blocks() {
 	use sp_runtime::traits::Hash;
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	let mut n: u64 = 0;
 	net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, ..Default::default() });
 	net.add_full_peer_with_config(FullPeerConfig {
@@ -1175,7 +1216,7 @@ fn syncs_indexed_blocks() {
 		.unwrap()
 		.is_none());
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert!(net
 		.peer(1)
 		.client()
@@ -1188,7 +1229,8 @@ fn syncs_indexed_blocks() {
 #[test]
 fn warp_sync() {
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(0);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 0);
 	// Create 3 synced peers and 1 peer trying to warp sync.
 	net.add_full_peer_with_config(Default::default());
 	net.add_full_peer_with_config(Default::default());
@@ -1202,12 +1244,12 @@ fn warp_sync() {
 	net.peer(1).push_blocks(64, false);
 	net.peer(2).push_blocks(64, false);
 	// Wait for peer 1 to sync state.
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert!(!net.peer(3).client().has_state_at(&BlockId::Number(1)));
 	assert!(net.peer(3).client().has_state_at(&BlockId::Number(64)));
 
 	// Wait for peer 1 download block history
-	block_on(futures::future::poll_fn::<(), _>(|cx| {
+	runtime.block_on(futures::future::poll_fn::<(), _>(|cx| {
 		net.poll(cx);
 		if net.peer(3).has_body(gap_end) && net.peer(3).has_body(target) {
 			Poll::Ready(())
@@ -1224,7 +1266,8 @@ fn syncs_huge_blocks() {
 	use substrate_test_runtime_client::BlockBuilderExt;
 
 	sp_tracing::try_init_simple();
-	let mut net = TestNet::new(2);
+	let runtime = Runtime::new().unwrap();
+	let mut net = TestNet::new(runtime.handle().clone(), 2);
 
 	// Increase heap space for bigger blocks.
 	net.peer(0).generate_blocks(1, BlockOrigin::Own, |mut builder| {
@@ -1241,7 +1284,7 @@ fn syncs_huge_blocks() {
 		builder.build().unwrap().block
 	});
 
-	net.block_until_sync();
+	runtime.block_on(net.wait_until_sync());
 	assert_eq!(net.peer(0).client.info().best_number, 33);
 	assert_eq!(net.peer(1).client.info().best_number, 33);
 }
diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml
index f23335ef97e3316a6428ce83af8f585fe10daf99..6406d7dc475a82f6c447d1e6c8fd114cf900bb47 100644
--- a/substrate/client/offchain/Cargo.toml
+++ b/substrate/client/offchain/Cargo.toml
@@ -39,7 +39,7 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 
 [dev-dependencies]
 lazy_static = "1.4.0"
-tokio = "1.17.0"
+tokio = "1.22.0"
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 sc-client-db = { version = "0.10.0-dev", default-features = true, path = "../db" }
 sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" }
diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml
index ef2e6bec4cdb0cb3a59dcc52e068fe462323d657..a3e64c367afb6923667a04c448ba3b506f6778d9 100644
--- a/substrate/client/rpc-servers/Cargo.toml
+++ b/substrate/client/rpc-servers/Cargo.toml
@@ -17,5 +17,5 @@ futures = "0.3.21"
 jsonrpsee = { version = "0.15.1", features = ["server"] }
 log = "0.4.17"
 serde_json = "1.0.85"
-tokio = { version = "1.17.0", features = ["parking_lot"] }
+tokio = { version = "1.22.0", features = ["parking_lot"] }
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" }
diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml
index 51f5516ecf9c80eea8480bc49f6bc7583f01eb93..a0ae3038378ff053e24b352a399231b9087f0a44 100644
--- a/substrate/client/rpc-spec-v2/Cargo.toml
+++ b/substrate/client/rpc-spec-v2/Cargo.toml
@@ -30,4 +30,4 @@ futures = "0.3.21"
 
 [dev-dependencies]
 serde_json = "1.0"
-tokio = { version = "1.17.0", features = ["macros"] }
+tokio = { version = "1.22.0", features = ["macros"] }
diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml
index 0a420301826e1878300a99f88ac00acf2bea68b3..d690e2c7b4cf1bf32034dd02b8276d2651d2a51b 100644
--- a/substrate/client/rpc/Cargo.toml
+++ b/substrate/client/rpc/Cargo.toml
@@ -38,7 +38,7 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" }
 sp-session = { version = "4.0.0-dev", path = "../../primitives/session" }
 sp-version = { version = "5.0.0", path = "../../primitives/version" }
 
-tokio = { version = "1.17.0", optional = true }
+tokio = { version = "1.22.0", optional = true }
 
 [dev-dependencies]
 env_logger = "0.9"
@@ -49,7 +49,7 @@ sc-network = { version = "0.10.0-dev", path = "../network" }
 sc-network-common = { version = "0.10.0-dev", path = "../network/common" }
 sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" }
 sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" }
-tokio = "1.17.0"
+tokio = "1.22.0"
 sp-io = { version = "7.0.0", path = "../../primitives/io" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
 
diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml
index c165d2d1288cd991490071c5f214ce9ed160041e..8b17a8287c27447ba360bfede01de96d5d9eaf09 100644
--- a/substrate/client/service/Cargo.toml
+++ b/substrate/client/service/Cargo.toml
@@ -84,7 +84,7 @@ parity-util-mem = { version = "0.12.0", default-features = false, features = [
 	"primitive-types",
 ] }
 async-trait = "0.1.57"
-tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] }
+tokio = { version = "1.22.0", features = ["time", "rt-multi-thread", "parking_lot"] }
 tempfile = "3.1.0"
 directories = "4.0.1"
 static_init = "1.0.3"
@@ -92,4 +92,3 @@ static_init = "1.0.3"
 [dev-dependencies]
 substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
 substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" }
-async-std = { version = "1.11.0", default-features = false }
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index 50b6825f0c707da5118957661a28696e7f882fb6..df20f2009ee095f6d761f628737b35462650563d 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -876,9 +876,9 @@ where
 		role: config.role.clone(),
 		executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
-			Some(Box::new(move |fut| {
+			Box::new(move |fut| {
 				spawn_handle.spawn("libp2p-node", Some("networking"), fut);
-			}))
+			})
 		},
 		network_config: config.network.clone(),
 		chain: client.clone(),
diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml
index 8e6131cbb75de782d81f9bc5b41885c1fb5cb5ba..46ba1b33931aca6c9d130dbdc4e5927810f37f3a 100644
--- a/substrate/client/service/test/Cargo.toml
+++ b/substrate/client/service/test/Cargo.toml
@@ -19,7 +19,7 @@ log = "0.4.17"
 parity-scale-codec = "3.0.0"
 parking_lot = "0.12.1"
 tempfile = "3.1.0"
-tokio = { version = "1.17.0", features = ["time"] }
+tokio = { version = "1.22.0", features = ["time"] }
 sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" }
 sc-client-api = { version = "4.0.0-dev", path = "../../api" }
 sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../../db" }
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 5d29d34a3cbf21623bea7de855f80e1a5bbe0bb9..5f75e3521e23584c50b5c9e2966a49c7b075e9f9 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -302,48 +302,55 @@ where
 		full: impl Iterator<Item = impl FnOnce(Configuration) -> Result<(F, U), Error>>,
 		authorities: impl Iterator<Item = (String, impl FnOnce(Configuration) -> Result<(F, U), Error>)>,
 	) {
-		let handle = self.runtime.handle().clone();
-
-		for (key, authority) in authorities {
-			let node_config = node_config(
-				self.nodes,
-				&self.chain_spec,
-				Role::Authority,
-				handle.clone(),
-				Some(key),
-				self.base_port,
-				temp,
-			);
-			let addr = node_config.network.listen_addresses.first().unwrap().clone();
-			let (service, user_data) =
-				authority(node_config).expect("Error creating test node service");
-
-			handle.spawn(service.clone().map_err(|_| ()));
-			let addr =
-				MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() };
-			self.authority_nodes.push((self.nodes, service, user_data, addr));
-			self.nodes += 1;
-		}
+		self.runtime.block_on(async {
+			let handle = self.runtime.handle().clone();
+
+			for (key, authority) in authorities {
+				let node_config = node_config(
+					self.nodes,
+					&self.chain_spec,
+					Role::Authority,
+					handle.clone(),
+					Some(key),
+					self.base_port,
+					temp,
+				);
+				let addr = node_config.network.listen_addresses.first().unwrap().clone();
+				let (service, user_data) =
+					authority(node_config).expect("Error creating test node service");
+
+				handle.spawn(service.clone().map_err(|_| ()));
+				let addr = MultiaddrWithPeerId {
+					multiaddr: addr,
+					peer_id: service.network().local_peer_id(),
+				};
+				self.authority_nodes.push((self.nodes, service, user_data, addr));
+				self.nodes += 1;
+			}
 
-		for full in full {
-			let node_config = node_config(
-				self.nodes,
-				&self.chain_spec,
-				Role::Full,
-				handle.clone(),
-				None,
-				self.base_port,
-				temp,
-			);
-			let addr = node_config.network.listen_addresses.first().unwrap().clone();
-			let (service, user_data) = full(node_config).expect("Error creating test node service");
-
-			handle.spawn(service.clone().map_err(|_| ()));
-			let addr =
-				MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() };
-			self.full_nodes.push((self.nodes, service, user_data, addr));
-			self.nodes += 1;
-		}
+			for full in full {
+				let node_config = node_config(
+					self.nodes,
+					&self.chain_spec,
+					Role::Full,
+					handle.clone(),
+					None,
+					self.base_port,
+					temp,
+				);
+				let addr = node_config.network.listen_addresses.first().unwrap().clone();
+				let (service, user_data) =
+					full(node_config).expect("Error creating test node service");
+
+				handle.spawn(service.clone().map_err(|_| ()));
+				let addr = MultiaddrWithPeerId {
+					multiaddr: addr,
+					peer_id: service.network().local_peer_id(),
+				};
+				self.full_nodes.push((self.nodes, service, user_data, addr));
+				self.nodes += 1;
+			}
+		});
 	}
 }
 
diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml
index f8c6f281546db06cc1a3e5a4e6f296d4e6d08d9b..0a0b9284efa24f1138458340940a47c90f549aee 100644
--- a/substrate/client/telemetry/Cargo.toml
+++ b/substrate/client/telemetry/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 chrono = "0.4.19"
 futures = "0.3.21"
-libp2p = { version = "0.49.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] }
+libp2p = { version = "0.49.0", default-features = false, features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] }
 log = "0.4.17"
 parking_lot = "0.12.1"
 pin-project = "1.0.12"
diff --git a/substrate/client/telemetry/src/transport.rs b/substrate/client/telemetry/src/transport.rs
index d64da44a83b6ba29ed0f08c6ec0b5023b713423d..d8bd138c5af252a9d4b24d09710a1c13afacb32d 100644
--- a/substrate/client/telemetry/src/transport.rs
+++ b/substrate/client/telemetry/src/transport.rs
@@ -17,7 +17,6 @@
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 use futures::{
-	executor::block_on,
 	prelude::*,
 	ready,
 	task::{Context, Poll},
@@ -31,8 +30,8 @@ const CONNECT_TIMEOUT: Duration = Duration::from_secs(20);
 
 pub(crate) fn initialize_transport() -> Result<WsTrans, io::Error> {
 	let transport = {
-		let tcp_transport = libp2p::tcp::TcpTransport::new(libp2p::tcp::GenTcpConfig::new());
-		let inner = block_on(libp2p::dns::DnsConfig::system(tcp_transport))?;
+		let tcp_transport = libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::new());
+		let inner = libp2p::dns::TokioDnsConfig::system(tcp_transport)?;
 		libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| {
 			let connec = connec
 				.with(|item| {
diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml
index b803aad69263f2ed693bf42e6fb70d020f2ece96..30d90e8b862a7467a1a445a75b225bbf98f6c59b 100644
--- a/substrate/frame/state-trie-migration/Cargo.toml
+++ b/substrate/frame/state-trie-migration/Cargo.toml
@@ -31,7 +31,7 @@ substrate-state-trie-migration-rpc = { optional = true, path = "../../utils/fram
 
 [dev-dependencies]
 parking_lot = "0.12.1"
-tokio = { version = "1.17.0", features = ["macros"] }
+tokio = { version = "1.22.0", features = ["macros"] }
 pallet-balances = { path = "../balances" }
 sp-tracing = { path = "../../primitives/tracing" }
 
diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml
index b60183c180b4a48400806668cb4464496ded7e31..c7201da23ab17eecb95f8d5d1f520f9b5195a4e4 100644
--- a/substrate/test-utils/Cargo.toml
+++ b/substrate/test-utils/Cargo.toml
@@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 futures = "0.3.16"
-tokio = { version = "1.17.0", features = ["macros", "time"] }
+tokio = { version = "1.22.0", features = ["macros", "time"] }
 substrate-test-utils-derive = { version = "0.10.0-dev", path = "./derive" }
 
 [dev-dependencies]
diff --git a/substrate/test-utils/test-crate/Cargo.toml b/substrate/test-utils/test-crate/Cargo.toml
index 2b66df6ae6513d1cb81a54e0e31abb669d6e2780..67966dd2b6015df041cec01e71571e6efa5620de 100644
--- a/substrate/test-utils/test-crate/Cargo.toml
+++ b/substrate/test-utils/test-crate/Cargo.toml
@@ -12,6 +12,6 @@ publish = false
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dev-dependencies]
-tokio = { version = "1.17.0", features = ["macros"] }
+tokio = { version = "1.22.0", features = ["macros"] }
 sc-service = { version = "0.10.0-dev", path = "../../client/service" }
 test-utils = { package = "substrate-test-utils", version = "4.0.0-dev", path = ".." }
diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml
index e329b7f3f2c58205f593f7f15d334e9b9a551283..f24ab346c4084e54c0a8d2e4fc60d345f38d132b 100644
--- a/substrate/utils/frame/remote-externalities/Cargo.toml
+++ b/substrate/utils/frame/remote-externalities/Cargo.toml
@@ -26,7 +26,7 @@ sp-version = { version = "5.0.0", path = "../../../primitives/version" }
 substrate-rpc-client = { path = "../rpc/client" }
 
 [dev-dependencies]
-tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread"] }
+tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] }
 frame-support = { version = "4.0.0-dev", path = "../../../frame/support" }
 pallet-elections-phragmen = { version = "5.0.0-dev", path = "../../../frame/elections-phragmen" }
 
diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml
index 78134a79bd0def8429e9afc890cc8059e320ec28..371996a4edfd339794e81c175d7d1e87da686472 100644
--- a/substrate/utils/frame/rpc/client/Cargo.toml
+++ b/substrate/utils/frame/rpc/client/Cargo.toml
@@ -21,5 +21,5 @@ sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" }
 log = "0.4"
 
 [dev-dependencies]
-tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread", "sync"] }
+tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread", "sync"] }
 sp-core = { path = "../../../../primitives/core" }
diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml
index 5b781c72056a280b030951bb0aea4815afe561d6..119acbd937c8aa5058123fe3a2d9cfdfea11e9f5 100644
--- a/substrate/utils/frame/rpc/support/Cargo.toml
+++ b/substrate/utils/frame/rpc/support/Cargo.toml
@@ -26,7 +26,7 @@ sp-storage = { version = "7.0.0", path = "../../../../primitives/storage" }
 [dev-dependencies]
 scale-info = "2.1.1"
 jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] }
-tokio = "1.17.0"
+tokio = "1.22.0"
 sp-core = { version = "7.0.0", path = "../../../../primitives/core" }
 sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" }
 frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" }
diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml
index ddc52ffe56a533d421eebeec811da9a4ac277f58..56b8a79f8c0800d377bdde7addb37f527ee4214a 100644
--- a/substrate/utils/frame/rpc/system/Cargo.toml
+++ b/substrate/utils/frame/rpc/system/Cargo.toml
@@ -30,7 +30,7 @@ sp-runtime = { version = "7.0.0", path = "../../../../primitives/runtime" }
 
 [dev-dependencies]
 sc-transaction-pool = { version = "4.0.0-dev", path = "../../../../client/transaction-pool" }
-tokio = "1.17.0"
+tokio = "1.22.0"
 assert_matches = "1.3.0"
 sp-tracing = { version = "6.0.0", path = "../../../../primitives/tracing" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" }
diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml
index 725e3d565efbb4073f1cb5a25326a5fe836eeb29..e7bbba131155f33e9164e54958745d05cb47a9f2 100644
--- a/substrate/utils/frame/try-runtime/cli/Cargo.toml
+++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml
@@ -35,7 +35,7 @@ frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" }
 substrate-rpc-client = { path = "../../rpc/client" }
 
 [dev-dependencies]
-tokio = "1.17.0"
+tokio = "1.22.0"
 
 [features]
 try-runtime = [
diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml
index 3c2f8321befbe6844a42b3a8068fbb7da40f69ab..1371fe6f408c0e98da36ae09958c68aeb6747af2 100644
--- a/substrate/utils/prometheus/Cargo.toml
+++ b/substrate/utils/prometheus/Cargo.toml
@@ -18,8 +18,8 @@ hyper = { version = "0.14.16", default-features = false, features = ["http1", "s
 log = "0.4.17"
 prometheus = { version = "0.13.0", default-features = false }
 thiserror = "1.0"
-tokio = { version = "1.17.0", features = ["parking_lot"] }
+tokio = { version = "1.22.0", features = ["parking_lot"] }
 
 [dev-dependencies]
 hyper = { version = "0.14.16", features = ["client"] }
-tokio = { version = "1.17.0", features = ["rt-multi-thread"] }
+tokio = { version = "1.22.0", features = ["rt-multi-thread"] }